e2e/storage: speed up skipping, simplify APIs and test definition
CreateDriver (now called SetupTest) is a potentially expensive operation, depending on the driver. Creating and tearing down a framework instance also takes time (measured at 6 seconds on a fast machine) and produces quite a bit of log output. Both can be avoided for tests that skip based on static information (like for instance the current OS, vendor, driver and test pattern) by making the test suite responsible for creating framework and driver. The lifecycle of the TestConfig instance was confusing because it was stored inside the DriverInfo, a struct which conceptually is static, while the TestConfig is dynamic. It is cleaner to separate the two, even if that means that an additional pointer must be passed into some functions. Now CreateDriver is responsible for initializing the PerTestConfig that is to be used by the test. To make this approach simpler to implement (= less functions which need the pointer) and the tests easier to read, the entire setup and test definition is now contained in a single function. This is how it is normally done in Ginkgo. This is easier to read because one can see at a glance where variables are set, instead of having to trace values though two additional structs (TestResource and TestInput). Because we are changing the API already, also other changes are made: - some function prototypes get simplified - the naming of functions is changed to match their purpose (tests aren't executed by the test suite, they only get defined for later execution) - unused methods get removed (TestSuite.skipUnsupportedTest is redundant)
This commit is contained in:
@@ -17,10 +17,8 @@ limitations under the License.
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -32,7 +30,6 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
|
csiclient "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/podlogs"
|
|
||||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||||
@@ -47,9 +44,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// List of testDrivers to be executed in below loop
|
// List of testDrivers to be executed in below loop
|
||||||
var csiTestDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
|
var csiTestDrivers = []func() testsuites.TestDriver{
|
||||||
drivers.InitHostPathCSIDriver,
|
drivers.InitHostPathCSIDriver,
|
||||||
drivers.InitGcePDCSIDriver,
|
func() testsuites.TestDriver { return drivers.InitGcePDCSIDriver(false /* topology enabled */) },
|
||||||
drivers.InitGcePDExternalCSIDriver,
|
drivers.InitGcePDExternalCSIDriver,
|
||||||
drivers.InitHostPathV0CSIDriver,
|
drivers.InitHostPathV0CSIDriver,
|
||||||
// Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage.
|
// Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage.
|
||||||
@@ -81,117 +78,56 @@ func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPatt
|
|||||||
|
|
||||||
// This executes testSuites for csi volumes.
|
// This executes testSuites for csi volumes.
|
||||||
var _ = utils.SIGDescribe("CSI Volumes", func() {
|
var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||||
f := framework.NewDefaultFramework("csi-volumes")
|
|
||||||
|
|
||||||
var (
|
|
||||||
cancel context.CancelFunc
|
|
||||||
cs clientset.Interface
|
|
||||||
csics csiclient.Interface
|
|
||||||
ns *v1.Namespace
|
|
||||||
// Common configuration options for each driver.
|
|
||||||
config = testsuites.TestConfig{
|
|
||||||
Framework: f,
|
|
||||||
Prefix: "csi",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
ctx, c := context.WithCancel(context.Background())
|
|
||||||
cancel = c
|
|
||||||
cs = f.ClientSet
|
|
||||||
csics = f.CSIClientSet
|
|
||||||
ns = f.Namespace
|
|
||||||
|
|
||||||
// Debugging of the following tests heavily depends on the log output
|
|
||||||
// of the different containers. Therefore include all of that in log
|
|
||||||
// files (when using --report-dir, as in the CI) or the output stream
|
|
||||||
// (otherwise).
|
|
||||||
to := podlogs.LogOutput{
|
|
||||||
StatusWriter: GinkgoWriter,
|
|
||||||
}
|
|
||||||
if framework.TestContext.ReportDir == "" {
|
|
||||||
to.LogWriter = GinkgoWriter
|
|
||||||
} else {
|
|
||||||
test := CurrentGinkgoTestDescription()
|
|
||||||
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
|
|
||||||
// We end the prefix with a slash to ensure that all logs
|
|
||||||
// end up in a directory named after the current test.
|
|
||||||
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
|
|
||||||
reg.ReplaceAllString(test.FullTestText, "_") + "/"
|
|
||||||
}
|
|
||||||
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
|
|
||||||
|
|
||||||
// pod events are something that the framework already collects itself
|
|
||||||
// after a failed test. Logging them live is only useful for interactive
|
|
||||||
// debugging, not when we collect reports.
|
|
||||||
if framework.TestContext.ReportDir == "" {
|
|
||||||
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
cancel()
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, initDriver := range csiTestDrivers {
|
for _, initDriver := range csiTestDrivers {
|
||||||
curDriver := initDriver(config)
|
curDriver := initDriver()
|
||||||
curConfig := curDriver.GetDriverInfo().Config
|
|
||||||
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
|
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||||
BeforeEach(func() {
|
testsuites.DefineTestSuite(curDriver, csiTestSuites, csiTunePattern)
|
||||||
// Reset config. The driver might have modified its copy
|
|
||||||
// in a previous test.
|
|
||||||
curDriver.GetDriverInfo().Config = curConfig
|
|
||||||
|
|
||||||
// setupDriver
|
|
||||||
curDriver.CreateDriver()
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
// Cleanup driver
|
|
||||||
curDriver.CleanupDriver()
|
|
||||||
})
|
|
||||||
|
|
||||||
testsuites.RunTestSuite(f, curDriver, csiTestSuites, csiTunePattern)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
Context("CSI Topology test using GCE PD driver [Feature:CSINodeInfo]", func() {
|
Context("CSI Topology test using GCE PD driver [Feature:CSINodeInfo]", func() {
|
||||||
newConfig := config
|
f := framework.NewDefaultFramework("csitopology")
|
||||||
newConfig.TopologyEnabled = true
|
driver := drivers.InitGcePDCSIDriver(true /* topology enabled */).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
|
||||||
driver := drivers.InitGcePDCSIDriver(newConfig).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite.
|
var (
|
||||||
|
config *testsuites.PerTestConfig
|
||||||
|
testCleanup func()
|
||||||
|
)
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
driver.CreateDriver()
|
config, testCleanup = driver.PrepareTest(f)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
driver.CleanupDriver()
|
if testCleanup != nil {
|
||||||
|
testCleanup()
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() {
|
It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() {
|
||||||
suffix := "topology-positive"
|
suffix := "topology-positive"
|
||||||
testTopologyPositive(cs, suffix, ns.GetName(), false /* delayBinding */, true /* allowedTopologies */)
|
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() {
|
It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() {
|
||||||
suffix := "delayed"
|
suffix := "delayed"
|
||||||
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, false /* allowedTopologies */)
|
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() {
|
It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() {
|
||||||
suffix := "delayed-topology-positive"
|
suffix := "delayed-topology-positive"
|
||||||
testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, true /* allowedTopologies */)
|
testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() {
|
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() {
|
||||||
framework.SkipUnlessMultizone(cs)
|
framework.SkipUnlessMultizone(config.Framework.ClientSet)
|
||||||
suffix := "topology-negative"
|
suffix := "topology-negative"
|
||||||
testTopologyNegative(cs, suffix, ns.GetName(), false /* delayBinding */)
|
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() {
|
It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() {
|
||||||
framework.SkipUnlessMultizone(cs)
|
framework.SkipUnlessMultizone(config.Framework.ClientSet)
|
||||||
suffix := "delayed-topology-negative"
|
suffix := "delayed-topology-negative"
|
||||||
testTopologyNegative(cs, suffix, ns.GetName(), true /* delayBinding */)
|
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -227,29 +163,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
|||||||
|
|
||||||
for _, t := range tests {
|
for _, t := range tests {
|
||||||
test := t
|
test := t
|
||||||
It(test.name, func() {
|
f := framework.NewDefaultFramework("csiattach")
|
||||||
By("Deploying mock CSI driver")
|
|
||||||
config := testsuites.TestConfig{
|
|
||||||
Framework: f,
|
|
||||||
Prefix: "csi-attach",
|
|
||||||
}
|
|
||||||
|
|
||||||
driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, test.driverAttachable, nil)
|
It(test.name, func() {
|
||||||
driver.CreateDriver()
|
cs := f.ClientSet
|
||||||
defer driver.CleanupDriver()
|
csics := f.CSIClientSet
|
||||||
|
ns := f.Namespace
|
||||||
|
|
||||||
|
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, test.driverAttachable, nil)
|
||||||
|
config, testCleanup := driver.PrepareTest(f)
|
||||||
|
driverName := config.GetUniqueDriverName()
|
||||||
|
defer testCleanup()
|
||||||
|
|
||||||
if test.deployDriverCRD {
|
if test.deployDriverCRD {
|
||||||
err = waitForCSIDriver(csics, driver)
|
err = waitForCSIDriver(csics, driverName)
|
||||||
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
|
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
|
||||||
defer destroyCSIDriver(csics, driver)
|
defer destroyCSIDriver(csics, driverName)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Creating pod")
|
By("Creating pod")
|
||||||
var sc *storagev1.StorageClass
|
var sc *storagev1.StorageClass
|
||||||
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
|
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
|
||||||
sc = dDriver.GetDynamicProvisionStorageClass("")
|
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
|
||||||
}
|
}
|
||||||
nodeName := driver.GetDriverInfo().Config.ClientNodeName
|
nodeName := config.ClientNodeName
|
||||||
scTest := testsuites.StorageClassTest{
|
scTest := testsuites.StorageClassTest{
|
||||||
Name: driver.GetDriverInfo().Name,
|
Name: driver.GetDriverInfo().Name,
|
||||||
Provisioner: sc.Provisioner,
|
Provisioner: sc.Provisioner,
|
||||||
@@ -347,29 +284,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
|||||||
}
|
}
|
||||||
for _, t := range tests {
|
for _, t := range tests {
|
||||||
test := t
|
test := t
|
||||||
It(test.name, func() {
|
f := framework.NewDefaultFramework("csiworkload")
|
||||||
By("Deploying mock CSI driver")
|
|
||||||
config := testsuites.TestConfig{
|
|
||||||
Framework: f,
|
|
||||||
Prefix: "csi-workload",
|
|
||||||
}
|
|
||||||
|
|
||||||
driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, true, test.podInfoOnMountVersion)
|
It(test.name, func() {
|
||||||
driver.CreateDriver()
|
cs := f.ClientSet
|
||||||
defer driver.CleanupDriver()
|
csics := f.CSIClientSet
|
||||||
|
ns := f.Namespace
|
||||||
|
|
||||||
|
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, true, test.podInfoOnMountVersion)
|
||||||
|
config, testCleanup := driver.PrepareTest(f)
|
||||||
|
driverName := config.GetUniqueDriverName()
|
||||||
|
defer testCleanup()
|
||||||
|
|
||||||
if test.deployDriverCRD {
|
if test.deployDriverCRD {
|
||||||
err = waitForCSIDriver(csics, driver)
|
err = waitForCSIDriver(csics, driverName)
|
||||||
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
|
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
|
||||||
defer destroyCSIDriver(csics, driver)
|
defer destroyCSIDriver(csics, driverName)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Creating pod")
|
By("Creating pod")
|
||||||
var sc *storagev1.StorageClass
|
var sc *storagev1.StorageClass
|
||||||
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
|
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
|
||||||
sc = dDriver.GetDynamicProvisionStorageClass("")
|
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
|
||||||
}
|
}
|
||||||
nodeName := driver.GetDriverInfo().Config.ClientNodeName
|
nodeName := config.ClientNodeName
|
||||||
scTest := testsuites.StorageClassTest{
|
scTest := testsuites.StorageClassTest{
|
||||||
Name: driver.GetDriverInfo().Name,
|
Name: driver.GetDriverInfo().Name,
|
||||||
Parameters: sc.Parameters,
|
Parameters: sc.Parameters,
|
||||||
@@ -420,14 +358,16 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela
|
|||||||
topoZone := getRandomClusterZone(cs)
|
topoZone := getRandomClusterZone(cs)
|
||||||
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone)
|
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone)
|
||||||
}
|
}
|
||||||
claim := newClaim(test, namespace, suffix)
|
test.Client = cs
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim = newClaim(test, namespace, suffix)
|
||||||
|
test.Claim.Spec.StorageClassName = &class.Name
|
||||||
|
test.Class = class
|
||||||
|
|
||||||
if delayBinding {
|
if delayBinding {
|
||||||
_, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */)
|
_, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */)
|
||||||
Expect(node).ToNot(BeNil(), "Unexpected nil node found")
|
Expect(node).ToNot(BeNil(), "Unexpected nil node found")
|
||||||
} else {
|
} else {
|
||||||
testsuites.TestDynamicProvisioning(test, cs, claim, class)
|
test.TestDynamicProvisioning()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -447,12 +387,13 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
|
|||||||
test.DelayBinding = delayBinding
|
test.DelayBinding = delayBinding
|
||||||
nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone}
|
nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone}
|
||||||
|
|
||||||
class := newStorageClass(test, namespace, suffix)
|
test.Client = cs
|
||||||
addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone)
|
test.Class = newStorageClass(test, namespace, suffix)
|
||||||
claim := newClaim(test, namespace, suffix)
|
addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim = newClaim(test, namespace, suffix)
|
||||||
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
if delayBinding {
|
if delayBinding {
|
||||||
testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */)
|
test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */)
|
||||||
} else {
|
} else {
|
||||||
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||||
// Ensure that a pod cannot be scheduled in an unsuitable zone.
|
// Ensure that a pod cannot be scheduled in an unsuitable zone.
|
||||||
@@ -461,13 +402,12 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
|
|||||||
defer testsuites.StopPod(cs, pod)
|
defer testsuites.StopPod(cs, pod)
|
||||||
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
|
framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable")
|
||||||
}
|
}
|
||||||
testsuites.TestDynamicProvisioning(test, cs, claim, class)
|
test.TestDynamicProvisioning()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) error {
|
func waitForCSIDriver(csics csiclient.Interface, driverName string) error {
|
||||||
timeout := 2 * time.Minute
|
timeout := 2 * time.Minute
|
||||||
driverName := testsuites.GetUniqueDriverName(driver)
|
|
||||||
|
|
||||||
framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
|
framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
|
||||||
@@ -479,8 +419,7 @@ func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) e
|
|||||||
return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName)
|
return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName)
|
||||||
}
|
}
|
||||||
|
|
||||||
func destroyCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) {
|
func destroyCSIDriver(csics csiclient.Interface, driverName string) {
|
||||||
driverName := testsuites.GetUniqueDriverName(driver)
|
|
||||||
driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{})
|
driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
|
framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
|
||||||
|
@@ -56,12 +56,11 @@ const (
|
|||||||
|
|
||||||
// hostpathCSI
|
// hostpathCSI
|
||||||
type hostpathCSIDriver struct {
|
type hostpathCSIDriver struct {
|
||||||
cleanup func()
|
|
||||||
driverInfo testsuites.DriverInfo
|
driverInfo testsuites.DriverInfo
|
||||||
manifests []string
|
manifests []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
|
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
|
||||||
return &hostpathCSIDriver{
|
return &hostpathCSIDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -71,7 +70,6 @@ func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabiliti
|
|||||||
"", // Default fsType
|
"", // Default fsType
|
||||||
),
|
),
|
||||||
Capabilities: capabilities,
|
Capabilities: capabilities,
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
manifests: manifests,
|
manifests: manifests,
|
||||||
}
|
}
|
||||||
@@ -82,8 +80,8 @@ var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{}
|
|||||||
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
|
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
|
||||||
|
|
||||||
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
|
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
|
||||||
func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitHostPathCSIDriver() testsuites.TestDriver {
|
||||||
return initHostPathCSIDriver("csi-hostpath", config,
|
return initHostPathCSIDriver("csi-hostpath",
|
||||||
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
|
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||||
@@ -104,19 +102,19 @@ func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
|||||||
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := testsuites.GetUniqueDriverName(h)
|
provisioner := config.GetUniqueDriverName()
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
ns := h.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", provisioner)
|
suffix := fmt.Sprintf("%s-sc", provisioner)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostpathCSIDriver) GetSnapshotClass() *unstructured.Unstructured {
|
func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
|
||||||
snapshotter := testsuites.GetUniqueDriverName(h)
|
snapshotter := config.GetUniqueDriverName()
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
ns := h.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||||
|
|
||||||
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||||
@@ -126,57 +124,60 @@ func (h *hostpathCSIDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostpathCSIDriver) CreateDriver() {
|
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name))
|
By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name))
|
||||||
f := h.driverInfo.Config.Framework
|
cancelLogging := testsuites.StartPodLogs(f)
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
|
|
||||||
// The hostpath CSI driver only works when everything runs on the same node.
|
// The hostpath CSI driver only works when everything runs on the same node.
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||||
nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name
|
nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name
|
||||||
h.driverInfo.Config.ClientNodeName = nodeName
|
config := &testsuites.PerTestConfig{
|
||||||
|
Driver: h,
|
||||||
|
Prefix: "hostpath",
|
||||||
|
Framework: f,
|
||||||
|
ClientNodeName: nodeName,
|
||||||
|
}
|
||||||
|
|
||||||
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
|
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
|
||||||
// settings are ignored for this test. We could patch the image definitions.
|
// settings are ignored for this test. We could patch the image definitions.
|
||||||
o := utils.PatchCSIOptions{
|
o := utils.PatchCSIOptions{
|
||||||
OldDriverName: h.driverInfo.Name,
|
OldDriverName: h.driverInfo.Name,
|
||||||
NewDriverName: testsuites.GetUniqueDriverName(h),
|
NewDriverName: config.GetUniqueDriverName(),
|
||||||
DriverContainerName: "hostpath",
|
DriverContainerName: "hostpath",
|
||||||
DriverContainerArguments: []string{"--drivername=csi-hostpath-" + f.UniqueName},
|
DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()},
|
||||||
ProvisionerContainerName: "csi-provisioner",
|
ProvisionerContainerName: "csi-provisioner",
|
||||||
SnapshotterContainerName: "csi-snapshotter",
|
SnapshotterContainerName: "csi-snapshotter",
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
}
|
}
|
||||||
cleanup, err := h.driverInfo.Config.Framework.CreateFromManifests(func(item interface{}) error {
|
cleanup, err := config.Framework.CreateFromManifests(func(item interface{}) error {
|
||||||
return utils.PatchCSIDeployment(h.driverInfo.Config.Framework, o, item)
|
return utils.PatchCSIDeployment(config.Framework, o, item)
|
||||||
},
|
},
|
||||||
h.manifests...)
|
h.manifests...)
|
||||||
h.cleanup = cleanup
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
|
framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (h *hostpathCSIDriver) CleanupDriver() {
|
return config, func() {
|
||||||
if h.cleanup != nil {
|
|
||||||
By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name))
|
By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name))
|
||||||
h.cleanup()
|
cleanup()
|
||||||
|
cancelLogging()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// mockCSI
|
// mockCSI
|
||||||
type mockCSIDriver struct {
|
type mockCSIDriver struct {
|
||||||
cleanup func()
|
|
||||||
driverInfo testsuites.DriverInfo
|
driverInfo testsuites.DriverInfo
|
||||||
manifests []string
|
manifests []string
|
||||||
podInfoVersion *string
|
podInfoVersion *string
|
||||||
|
attachable bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ testsuites.TestDriver = &mockCSIDriver{}
|
var _ testsuites.TestDriver = &mockCSIDriver{}
|
||||||
var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
|
var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
|
||||||
|
|
||||||
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
||||||
func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver {
|
func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver {
|
||||||
driverManifests := []string{
|
driverManifests := []string{
|
||||||
"test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
||||||
@@ -187,16 +188,12 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac
|
|||||||
"test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml",
|
"test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml",
|
||||||
}
|
}
|
||||||
|
|
||||||
config.ServerConfig = &framework.VolumeTestConfig{}
|
|
||||||
|
|
||||||
if registerDriver {
|
if registerDriver {
|
||||||
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml")
|
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
if driverAttachable {
|
if driverAttachable {
|
||||||
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml")
|
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml")
|
||||||
} else {
|
|
||||||
config.ServerConfig.ServerArgs = append(config.ServerConfig.ServerArgs, "--disable-attach")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return &mockCSIDriver{
|
return &mockCSIDriver{
|
||||||
@@ -212,10 +209,10 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac
|
|||||||
testsuites.CapFsGroup: false,
|
testsuites.CapFsGroup: false,
|
||||||
testsuites.CapExec: false,
|
testsuites.CapExec: false,
|
||||||
},
|
},
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
manifests: driverManifests,
|
manifests: driverManifests,
|
||||||
podInfoVersion: podInfoVersion,
|
podInfoVersion: podInfoVersion,
|
||||||
|
attachable: driverAttachable,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -226,10 +223,10 @@ func (m *mockCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
|||||||
func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := testsuites.GetUniqueDriverName(m)
|
provisioner := config.GetUniqueDriverName()
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
ns := m.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", provisioner)
|
suffix := fmt.Sprintf("%s-sc", provisioner)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
@@ -239,20 +236,24 @@ func (m *mockCSIDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mockCSIDriver) CreateDriver() {
|
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
By("deploying csi mock driver")
|
By("deploying csi mock driver")
|
||||||
f := m.driverInfo.Config.Framework
|
cancelLogging := testsuites.StartPodLogs(f)
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
|
|
||||||
// pods should be scheduled on the node
|
// pods should be scheduled on the node
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||||
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
||||||
m.driverInfo.Config.ClientNodeName = node.Name
|
config := &testsuites.PerTestConfig{
|
||||||
|
Driver: m,
|
||||||
|
Prefix: "mock",
|
||||||
|
Framework: f,
|
||||||
|
ClientNodeName: node.Name,
|
||||||
|
}
|
||||||
|
|
||||||
containerArgs := []string{"--name=csi-mock-" + f.UniqueName}
|
containerArgs := []string{"--name=csi-mock-" + f.UniqueName}
|
||||||
|
if !m.attachable {
|
||||||
if m.driverInfo.Config.ServerConfig != nil && m.driverInfo.Config.ServerConfig.ServerArgs != nil {
|
containerArgs = append(containerArgs, "--disable-attach")
|
||||||
containerArgs = append(containerArgs, m.driverInfo.Config.ServerConfig.ServerArgs...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
|
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
|
||||||
@@ -264,29 +265,27 @@ func (m *mockCSIDriver) CreateDriver() {
|
|||||||
DriverContainerArguments: containerArgs,
|
DriverContainerArguments: containerArgs,
|
||||||
ProvisionerContainerName: "csi-provisioner",
|
ProvisionerContainerName: "csi-provisioner",
|
||||||
ClusterRegistrarContainerName: "csi-cluster-driver-registrar",
|
ClusterRegistrarContainerName: "csi-cluster-driver-registrar",
|
||||||
NodeName: m.driverInfo.Config.ClientNodeName,
|
NodeName: config.ClientNodeName,
|
||||||
PodInfoVersion: m.podInfoVersion,
|
PodInfoVersion: m.podInfoVersion,
|
||||||
}
|
}
|
||||||
cleanup, err := f.CreateFromManifests(func(item interface{}) error {
|
cleanup, err := f.CreateFromManifests(func(item interface{}) error {
|
||||||
return utils.PatchCSIDeployment(f, o, item)
|
return utils.PatchCSIDeployment(f, o, item)
|
||||||
},
|
},
|
||||||
m.manifests...)
|
m.manifests...)
|
||||||
m.cleanup = cleanup
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("deploying csi mock driver: %v", err)
|
framework.Failf("deploying csi mock driver: %v", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (m *mockCSIDriver) CleanupDriver() {
|
return config, func() {
|
||||||
if m.cleanup != nil {
|
|
||||||
By("uninstalling csi mock driver")
|
By("uninstalling csi mock driver")
|
||||||
m.cleanup()
|
cleanup()
|
||||||
|
cancelLogging()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
|
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
|
||||||
func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitHostPathV0CSIDriver() testsuites.TestDriver {
|
||||||
return initHostPathCSIDriver("csi-hostpath-v0", config,
|
return initHostPathCSIDriver("csi-hostpath-v0",
|
||||||
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
|
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||||
@@ -300,16 +299,17 @@ func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver
|
|||||||
|
|
||||||
// gce-pd
|
// gce-pd
|
||||||
type gcePDCSIDriver struct {
|
type gcePDCSIDriver struct {
|
||||||
cleanup func()
|
topologyEnabled bool
|
||||||
driverInfo testsuites.DriverInfo
|
driverInfo testsuites.DriverInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ testsuites.TestDriver = &gcePDCSIDriver{}
|
var _ testsuites.TestDriver = &gcePDCSIDriver{}
|
||||||
var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{}
|
var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{}
|
||||||
|
|
||||||
// InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface
|
// InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface
|
||||||
func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitGcePDCSIDriver(topologyEnabled bool) testsuites.TestDriver {
|
||||||
return &gcePDCSIDriver{
|
return &gcePDCSIDriver{
|
||||||
|
topologyEnabled: topologyEnabled,
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: GCEPDCSIProvisionerName,
|
Name: GCEPDCSIProvisionerName,
|
||||||
FeatureTag: "[Serial]",
|
FeatureTag: "[Serial]",
|
||||||
@@ -327,8 +327,6 @@ func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
testsuites.CapMultiPODs: true,
|
testsuites.CapMultiPODs: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -338,21 +336,14 @@ func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||||
f := g.driverInfo.Config.Framework
|
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
if !g.driverInfo.Config.TopologyEnabled {
|
|
||||||
// Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be
|
|
||||||
// scheduled in a different zone from the provisioned volume, causing basic provisioning
|
|
||||||
// tests to fail.
|
|
||||||
framework.SkipIfMultizone(f.ClientSet)
|
|
||||||
}
|
|
||||||
if pattern.FsType == "xfs" {
|
if pattern.FsType == "xfs" {
|
||||||
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
ns := g.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
provisioner := g.driverInfo.Name
|
provisioner := g.driverInfo.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
||||||
|
|
||||||
@@ -368,8 +359,16 @@ func (g *gcePDCSIDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePDCSIDriver) CreateDriver() {
|
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
if !g.topologyEnabled {
|
||||||
|
// Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be
|
||||||
|
// scheduled in a different zone from the provisioned volume, causing basic provisioning
|
||||||
|
// tests to fail.
|
||||||
|
framework.SkipIfMultizone(f.ClientSet)
|
||||||
|
}
|
||||||
|
|
||||||
By("deploying csi gce-pd driver")
|
By("deploying csi gce-pd driver")
|
||||||
|
cancelLogging := testsuites.StartPodLogs(f)
|
||||||
// It would be safer to rename the gcePD driver, but that
|
// It would be safer to rename the gcePD driver, but that
|
||||||
// hasn't been done before either and attempts to do so now led to
|
// hasn't been done before either and attempts to do so now led to
|
||||||
// errors during driver registration, therefore it is disabled
|
// errors during driver registration, therefore it is disabled
|
||||||
@@ -382,7 +381,7 @@ func (g *gcePDCSIDriver) CreateDriver() {
|
|||||||
// DriverContainerName: "gce-driver",
|
// DriverContainerName: "gce-driver",
|
||||||
// ProvisionerContainerName: "csi-external-provisioner",
|
// ProvisionerContainerName: "csi-external-provisioner",
|
||||||
// }
|
// }
|
||||||
createGCESecrets(g.driverInfo.Config.Framework.ClientSet, g.driverInfo.Config.Framework.Namespace.Name)
|
createGCESecrets(f.ClientSet, f.Namespace.Name)
|
||||||
|
|
||||||
manifests := []string{
|
manifests := []string{
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
||||||
@@ -392,23 +391,25 @@ func (g *gcePDCSIDriver) CreateDriver() {
|
|||||||
"test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml",
|
"test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml",
|
||||||
}
|
}
|
||||||
|
|
||||||
if g.driverInfo.Config.TopologyEnabled {
|
if g.topologyEnabled {
|
||||||
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss_alpha.yaml")
|
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss_alpha.yaml")
|
||||||
} else {
|
} else {
|
||||||
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml")
|
manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml")
|
||||||
}
|
}
|
||||||
cleanup, err := g.driverInfo.Config.Framework.CreateFromManifests(nil, manifests...)
|
cleanup, err := f.CreateFromManifests(nil, manifests...)
|
||||||
g.cleanup = cleanup
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("deploying csi gce-pd driver: %v", err)
|
framework.Failf("deploying csi gce-pd driver: %v", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (g *gcePDCSIDriver) CleanupDriver() {
|
return &testsuites.PerTestConfig{
|
||||||
By("uninstalling gce-pd driver")
|
Driver: g,
|
||||||
if g.cleanup != nil {
|
Prefix: "gcepd",
|
||||||
g.cleanup()
|
Framework: f,
|
||||||
}
|
}, func() {
|
||||||
|
By("uninstalling gce-pd driver")
|
||||||
|
cleanup()
|
||||||
|
cancelLogging()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// gcePd-external
|
// gcePd-external
|
||||||
@@ -420,7 +421,7 @@ var _ testsuites.TestDriver = &gcePDExternalCSIDriver{}
|
|||||||
var _ testsuites.DynamicPVTestDriver = &gcePDExternalCSIDriver{}
|
var _ testsuites.DynamicPVTestDriver = &gcePDExternalCSIDriver{}
|
||||||
|
|
||||||
// InitGcePDExternalCSIDriver returns gcePDExternalCSIDriver that implements TestDriver interface
|
// InitGcePDExternalCSIDriver returns gcePDExternalCSIDriver that implements TestDriver interface
|
||||||
func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitGcePDExternalCSIDriver() testsuites.TestDriver {
|
||||||
return &gcePDExternalCSIDriver{
|
return &gcePDExternalCSIDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: GCEPDCSIProvisionerName,
|
Name: GCEPDCSIProvisionerName,
|
||||||
@@ -440,8 +441,6 @@ func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDri
|
|||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
testsuites.CapMultiPODs: true,
|
testsuites.CapMultiPODs: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -452,14 +451,13 @@ func (g *gcePDExternalCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
|||||||
|
|
||||||
func (g *gcePDExternalCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
func (g *gcePDExternalCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
framework.SkipIfMultizone(g.driverInfo.Config.Framework.ClientSet)
|
|
||||||
if pattern.FsType == "xfs" {
|
if pattern.FsType == "xfs" {
|
||||||
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
ns := g.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
provisioner := g.driverInfo.Name
|
provisioner := g.driverInfo.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
||||||
|
|
||||||
@@ -475,8 +473,12 @@ func (g *gcePDExternalCSIDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePDExternalCSIDriver) CreateDriver() {
|
func (g *gcePDExternalCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
}
|
framework.SkipIfMultizone(f.ClientSet)
|
||||||
|
|
||||||
func (g *gcePDExternalCSIDriver) CleanupDriver() {
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: g,
|
||||||
|
Prefix: "gcepdext",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
@@ -82,7 +82,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &nfsDriver{}
|
|||||||
var _ testsuites.DynamicPVTestDriver = &nfsDriver{}
|
var _ testsuites.DynamicPVTestDriver = &nfsDriver{}
|
||||||
|
|
||||||
// InitNFSDriver returns nfsDriver that implements TestDriver interface
|
// InitNFSDriver returns nfsDriver that implements TestDriver interface
|
||||||
func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitNFSDriver() testsuites.TestDriver {
|
||||||
return &nfsDriver{
|
return &nfsDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "nfs",
|
Name: "nfs",
|
||||||
@@ -96,8 +96,6 @@ func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -133,10 +131,10 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (n *nfsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := n.externalPluginName
|
provisioner := n.externalPluginName
|
||||||
parameters := map[string]string{"mountOptions": "vers=4.1"}
|
parameters := map[string]string{"mountOptions": "vers=4.1"}
|
||||||
ns := n.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", n.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", n.driverInfo.Name)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
@@ -146,8 +144,7 @@ func (n *nfsDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *nfsDriver) CreateDriver() {
|
func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
f := n.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name)
|
n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name)
|
||||||
@@ -164,32 +161,32 @@ func (n *nfsDriver) CreateDriver() {
|
|||||||
|
|
||||||
By("creating an external dynamic provisioner pod")
|
By("creating an external dynamic provisioner pod")
|
||||||
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
|
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
|
||||||
|
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: n,
|
||||||
|
Prefix: "nfs",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {
|
||||||
|
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod))
|
||||||
|
clusterRoleBindingName := ns.Name + "--" + "cluster-admin"
|
||||||
|
cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *nfsDriver) CleanupDriver() {
|
func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
f := n.driverInfo.Config.Framework
|
f := config.Framework
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
|
|
||||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod))
|
|
||||||
clusterRoleBindingName := ns.Name + "--" + "cluster-admin"
|
|
||||||
cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := n.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
// NewNFSServer creates a pod for InlineVolume and PreprovisionedPV,
|
// NewNFSServer creates a pod for InlineVolume and PreprovisionedPV,
|
||||||
// and startExternalProvisioner creates a pods for DynamicPV.
|
// and startExternalProvisioner creates a pods for DynamicPV.
|
||||||
// Therefore, we need a different CreateDriver logic for volType.
|
// Therefore, we need a different PrepareTest logic for volType.
|
||||||
switch volType {
|
switch volType {
|
||||||
case testpatterns.InlineVolume:
|
case testpatterns.InlineVolume:
|
||||||
fallthrough
|
fallthrough
|
||||||
case testpatterns.PreprovisionedPV:
|
case testpatterns.PreprovisionedPV:
|
||||||
config, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{})
|
c, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{})
|
||||||
n.driverInfo.Config.ServerConfig = &config
|
config.ServerConfig = &c
|
||||||
return &nfsVolume{
|
return &nfsVolume{
|
||||||
serverIP: serverIP,
|
serverIP: serverIP,
|
||||||
serverPod: serverPod,
|
serverPod: serverPod,
|
||||||
@@ -224,7 +221,7 @@ var _ testsuites.InlineVolumeTestDriver = &glusterFSDriver{}
|
|||||||
var _ testsuites.PreprovisionedPVTestDriver = &glusterFSDriver{}
|
var _ testsuites.PreprovisionedPVTestDriver = &glusterFSDriver{}
|
||||||
|
|
||||||
// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface
|
// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface
|
||||||
func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitGlusterFSDriver() testsuites.TestDriver {
|
||||||
return &glusterFSDriver{
|
return &glusterFSDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "gluster",
|
Name: "gluster",
|
||||||
@@ -236,8 +233,6 @@ func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -280,19 +275,21 @@ func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *glusterFSDriver) CreateDriver() {
|
func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: g,
|
||||||
|
Prefix: "gluster",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *glusterFSDriver) CleanupDriver() {
|
func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (g *glusterFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := g.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
config, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name)
|
c, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name)
|
||||||
g.driverInfo.Config.ServerConfig = &config
|
config.ServerConfig = &c
|
||||||
return &glusterVolume{
|
return &glusterVolume{
|
||||||
prefix: config.Prefix,
|
prefix: config.Prefix,
|
||||||
serverPod: serverPod,
|
serverPod: serverPod,
|
||||||
@@ -339,7 +336,7 @@ var _ testsuites.InlineVolumeTestDriver = &iSCSIDriver{}
|
|||||||
var _ testsuites.PreprovisionedPVTestDriver = &iSCSIDriver{}
|
var _ testsuites.PreprovisionedPVTestDriver = &iSCSIDriver{}
|
||||||
|
|
||||||
// InitISCSIDriver returns iSCSIDriver that implements TestDriver interface
|
// InitISCSIDriver returns iSCSIDriver that implements TestDriver interface
|
||||||
func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitISCSIDriver() testsuites.TestDriver {
|
||||||
return &iSCSIDriver{
|
return &iSCSIDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "iscsi",
|
Name: "iscsi",
|
||||||
@@ -358,8 +355,6 @@ func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapBlock: true,
|
testsuites.CapBlock: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -408,19 +403,21 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo
|
|||||||
return &pvSource, nil
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *iSCSIDriver) CreateDriver() {
|
func (i *iSCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: i,
|
||||||
|
Prefix: "iscsi",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *iSCSIDriver) CleanupDriver() {
|
func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (i *iSCSIDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := i.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
config, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name)
|
c, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name)
|
||||||
i.driverInfo.Config.ServerConfig = &config
|
config.ServerConfig = &c
|
||||||
return &iSCSIVolume{
|
return &iSCSIVolume{
|
||||||
serverPod: serverPod,
|
serverPod: serverPod,
|
||||||
serverIP: serverIP,
|
serverIP: serverIP,
|
||||||
@@ -450,7 +447,7 @@ var _ testsuites.InlineVolumeTestDriver = &rbdDriver{}
|
|||||||
var _ testsuites.PreprovisionedPVTestDriver = &rbdDriver{}
|
var _ testsuites.PreprovisionedPVTestDriver = &rbdDriver{}
|
||||||
|
|
||||||
// InitRbdDriver returns rbdDriver that implements TestDriver interface
|
// InitRbdDriver returns rbdDriver that implements TestDriver interface
|
||||||
func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitRbdDriver() testsuites.TestDriver {
|
||||||
return &rbdDriver{
|
return &rbdDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "rbd",
|
Name: "rbd",
|
||||||
@@ -469,8 +466,6 @@ func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapBlock: true,
|
testsuites.CapBlock: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -505,12 +500,12 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
f := r.driverInfo.Config.Framework
|
|
||||||
ns := f.Namespace
|
|
||||||
|
|
||||||
rv, ok := volume.(*rbdVolume)
|
rv, ok := volume.(*rbdVolume)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume")
|
Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume")
|
||||||
|
|
||||||
|
f := rv.f
|
||||||
|
ns := f.Namespace
|
||||||
|
|
||||||
pvSource := v1.PersistentVolumeSource{
|
pvSource := v1.PersistentVolumeSource{
|
||||||
RBD: &v1.RBDPersistentVolumeSource{
|
RBD: &v1.RBDPersistentVolumeSource{
|
||||||
CephMonitors: []string{rv.serverIP},
|
CephMonitors: []string{rv.serverIP},
|
||||||
@@ -530,19 +525,21 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu
|
|||||||
return &pvSource, nil
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *rbdDriver) CreateDriver() {
|
func (r *rbdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: r,
|
||||||
|
Prefix: "rbd",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *rbdDriver) CleanupDriver() {
|
func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (r *rbdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := r.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
|
c, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
|
||||||
r.driverInfo.Config.ServerConfig = &config
|
config.ServerConfig = &c
|
||||||
return &rbdVolume{
|
return &rbdVolume{
|
||||||
serverPod: serverPod,
|
serverPod: serverPod,
|
||||||
serverIP: serverIP,
|
serverIP: serverIP,
|
||||||
@@ -577,7 +574,7 @@ var _ testsuites.InlineVolumeTestDriver = &cephFSDriver{}
|
|||||||
var _ testsuites.PreprovisionedPVTestDriver = &cephFSDriver{}
|
var _ testsuites.PreprovisionedPVTestDriver = &cephFSDriver{}
|
||||||
|
|
||||||
// InitCephFSDriver returns cephFSDriver that implements TestDriver interface
|
// InitCephFSDriver returns cephFSDriver that implements TestDriver interface
|
||||||
func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitCephFSDriver() testsuites.TestDriver {
|
||||||
return &cephFSDriver{
|
return &cephFSDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "ceph",
|
Name: "ceph",
|
||||||
@@ -590,8 +587,6 @@ func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -620,12 +615,11 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
|
||||||
f := c.driverInfo.Config.Framework
|
|
||||||
ns := f.Namespace
|
|
||||||
|
|
||||||
cv, ok := volume.(*cephVolume)
|
cv, ok := volume.(*cephVolume)
|
||||||
Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume")
|
Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume")
|
||||||
|
|
||||||
|
ns := cv.f.Namespace
|
||||||
|
|
||||||
return &v1.PersistentVolumeSource{
|
return &v1.PersistentVolumeSource{
|
||||||
CephFS: &v1.CephFSPersistentVolumeSource{
|
CephFS: &v1.CephFSPersistentVolumeSource{
|
||||||
Monitors: []string{cv.serverIP + ":6789"},
|
Monitors: []string{cv.serverIP + ":6789"},
|
||||||
@@ -639,19 +633,21 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, v
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cephFSDriver) CreateDriver() {
|
func (c *cephFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: c,
|
||||||
|
Prefix: "cephfs",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cephFSDriver) CleanupDriver() {
|
func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (c *cephFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := c.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
|
cfg, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name)
|
||||||
c.driverInfo.Config.ServerConfig = &config
|
config.ServerConfig = &cfg
|
||||||
return &cephVolume{
|
return &cephVolume{
|
||||||
serverPod: serverPod,
|
serverPod: serverPod,
|
||||||
serverIP: serverIP,
|
serverIP: serverIP,
|
||||||
@@ -676,7 +672,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathDriver{}
|
|||||||
var _ testsuites.InlineVolumeTestDriver = &hostPathDriver{}
|
var _ testsuites.InlineVolumeTestDriver = &hostPathDriver{}
|
||||||
|
|
||||||
// InitHostPathDriver returns hostPathDriver that implements TestDriver interface
|
// InitHostPathDriver returns hostPathDriver that implements TestDriver interface
|
||||||
func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitHostPathDriver() testsuites.TestDriver {
|
||||||
return &hostPathDriver{
|
return &hostPathDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "hostPath",
|
Name: "hostPath",
|
||||||
@@ -687,8 +683,6 @@ func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -712,20 +706,22 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume te
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostPathDriver) CreateDriver() {
|
func (h *hostPathDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: h,
|
||||||
|
Prefix: "hostpath",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostPathDriver) CleanupDriver() {
|
func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (h *hostPathDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := h.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
|
|
||||||
// pods should be scheduled on the node
|
// pods should be scheduled on the node
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||||
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
||||||
h.driverInfo.Config.ClientNodeName = node.Name
|
config.ClientNodeName = node.Name
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -748,7 +744,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathSymlinkDriver{}
|
|||||||
var _ testsuites.InlineVolumeTestDriver = &hostPathSymlinkDriver{}
|
var _ testsuites.InlineVolumeTestDriver = &hostPathSymlinkDriver{}
|
||||||
|
|
||||||
// InitHostPathSymlinkDriver returns hostPathSymlinkDriver that implements TestDriver interface
|
// InitHostPathSymlinkDriver returns hostPathSymlinkDriver that implements TestDriver interface
|
||||||
func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitHostPathSymlinkDriver() testsuites.TestDriver {
|
||||||
return &hostPathSymlinkDriver{
|
return &hostPathSymlinkDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "hostPathSymlink",
|
Name: "hostPathSymlink",
|
||||||
@@ -759,8 +755,6 @@ func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriv
|
|||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -787,14 +781,16 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, vo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostPathSymlinkDriver) CreateDriver() {
|
func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: h,
|
||||||
|
Prefix: "hostpathsymlink",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostPathSymlinkDriver) CleanupDriver() {
|
func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := h.driverInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
|
|
||||||
sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name)
|
sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name)
|
||||||
@@ -804,7 +800,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) t
|
|||||||
// pods should be scheduled on the node
|
// pods should be scheduled on the node
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||||
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
||||||
h.driverInfo.Config.ClientNodeName = node.Name
|
config.ClientNodeName = node.Name
|
||||||
|
|
||||||
cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
|
cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
|
||||||
privileged := true
|
privileged := true
|
||||||
@@ -888,7 +884,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &emptydirDriver{}
|
|||||||
var _ testsuites.InlineVolumeTestDriver = &emptydirDriver{}
|
var _ testsuites.InlineVolumeTestDriver = &emptydirDriver{}
|
||||||
|
|
||||||
// InitEmptydirDriver returns emptydirDriver that implements TestDriver interface
|
// InitEmptydirDriver returns emptydirDriver that implements TestDriver interface
|
||||||
func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitEmptydirDriver() testsuites.TestDriver {
|
||||||
return &emptydirDriver{
|
return &emptydirDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "emptydir",
|
Name: "emptydir",
|
||||||
@@ -899,8 +895,6 @@ func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -922,14 +916,16 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume te
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *emptydirDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
func (e *emptydirDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *emptydirDriver) CreateDriver() {
|
func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
}
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: e,
|
||||||
func (e *emptydirDriver) CleanupDriver() {
|
Prefix: "emptydir",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cinder
|
// Cinder
|
||||||
@@ -953,7 +949,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{}
|
|||||||
var _ testsuites.DynamicPVTestDriver = &cinderDriver{}
|
var _ testsuites.DynamicPVTestDriver = &cinderDriver{}
|
||||||
|
|
||||||
// InitCinderDriver returns cinderDriver that implements TestDriver interface
|
// InitCinderDriver returns cinderDriver that implements TestDriver interface
|
||||||
func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitCinderDriver() testsuites.TestDriver {
|
||||||
return &cinderDriver{
|
return &cinderDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "cinder",
|
Name: "cinder",
|
||||||
@@ -967,8 +963,6 @@ func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapFsGroup: true,
|
testsuites.CapFsGroup: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1013,13 +1007,13 @@ func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, v
|
|||||||
return &pvSource, nil
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := "kubernetes.io/cinder"
|
provisioner := "kubernetes.io/cinder"
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
parameters["fsType"] = fsType
|
parameters["fsType"] = fsType
|
||||||
}
|
}
|
||||||
ns := c.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
@@ -1029,14 +1023,16 @@ func (c *cinderDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cinderDriver) CreateDriver() {
|
func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: c,
|
||||||
|
Prefix: "cinder",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *cinderDriver) CleanupDriver() {
|
func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := c.driverInfo.Config.Framework
|
|
||||||
ns := f.Namespace
|
ns := f.Namespace
|
||||||
|
|
||||||
// We assume that namespace.Name is a random string
|
// We assume that namespace.Name is a random string
|
||||||
@@ -1109,7 +1105,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{}
|
|||||||
var _ testsuites.DynamicPVTestDriver = &gcePdDriver{}
|
var _ testsuites.DynamicPVTestDriver = &gcePdDriver{}
|
||||||
|
|
||||||
// InitGceDriver returns gcePdDriver that implements TestDriver interface
|
// InitGceDriver returns gcePdDriver that implements TestDriver interface
|
||||||
func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitGcePdDriver() testsuites.TestDriver {
|
||||||
return &gcePdDriver{
|
return &gcePdDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "gcepd",
|
Name: "gcepd",
|
||||||
@@ -1128,8 +1124,6 @@ func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapBlock: true,
|
testsuites.CapBlock: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1172,13 +1166,13 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo
|
|||||||
return &pvSource, nil
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePdDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := "kubernetes.io/gce-pd"
|
provisioner := "kubernetes.io/gce-pd"
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
parameters["fsType"] = fsType
|
parameters["fsType"] = fsType
|
||||||
}
|
}
|
||||||
ns := g.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
@@ -1188,17 +1182,19 @@ func (h *gcePdDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePdDriver) CreateDriver() {
|
func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: g,
|
||||||
|
Prefix: "gcepd",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePdDriver) CleanupDriver() {
|
func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
|
||||||
|
|
||||||
func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
if volType == testpatterns.InlineVolume {
|
if volType == testpatterns.InlineVolume {
|
||||||
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
|
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
|
||||||
// so pods should be also scheduled there.
|
// so pods should be also scheduled there.
|
||||||
g.driverInfo.Config.ClientNodeSelector = map[string]string{
|
config.ClientNodeSelector = map[string]string{
|
||||||
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
|
v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1231,7 +1227,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{}
|
|||||||
var _ testsuites.DynamicPVTestDriver = &vSphereDriver{}
|
var _ testsuites.DynamicPVTestDriver = &vSphereDriver{}
|
||||||
|
|
||||||
// InitVSphereDriver returns vSphereDriver that implements TestDriver interface
|
// InitVSphereDriver returns vSphereDriver that implements TestDriver interface
|
||||||
func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitVSphereDriver() testsuites.TestDriver {
|
||||||
return &vSphereDriver{
|
return &vSphereDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "vSphere",
|
Name: "vSphere",
|
||||||
@@ -1245,8 +1241,6 @@ func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapFsGroup: true,
|
testsuites.CapFsGroup: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1298,13 +1292,13 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string,
|
|||||||
return &pvSource, nil
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := "kubernetes.io/vsphere-volume"
|
provisioner := "kubernetes.io/vsphere-volume"
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
parameters["fsType"] = fsType
|
parameters["fsType"] = fsType
|
||||||
}
|
}
|
||||||
ns := v.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
@@ -1314,14 +1308,16 @@ func (v *vSphereDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *vSphereDriver) CreateDriver() {
|
func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: v,
|
||||||
|
Prefix: "vsphere",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *vSphereDriver) CleanupDriver() {
|
func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
f := config.Framework
|
||||||
|
|
||||||
func (v *vSphereDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
f := v.driverInfo.Config.Framework
|
|
||||||
vspheretest.Bootstrap(f)
|
vspheretest.Bootstrap(f)
|
||||||
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
|
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
|
||||||
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
|
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||||
@@ -1352,7 +1348,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &azureDriver{}
|
|||||||
var _ testsuites.DynamicPVTestDriver = &azureDriver{}
|
var _ testsuites.DynamicPVTestDriver = &azureDriver{}
|
||||||
|
|
||||||
// InitAzureDriver returns azureDriver that implements TestDriver interface
|
// InitAzureDriver returns azureDriver that implements TestDriver interface
|
||||||
func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitAzureDriver() testsuites.TestDriver {
|
||||||
return &azureDriver{
|
return &azureDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "azure",
|
Name: "azure",
|
||||||
@@ -1367,8 +1363,6 @@ func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapBlock: true,
|
testsuites.CapBlock: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1419,13 +1413,13 @@ func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo
|
|||||||
return &pvSource, nil
|
return &pvSource, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (a *azureDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := "kubernetes.io/azure-disk"
|
provisioner := "kubernetes.io/azure-disk"
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
parameters["fsType"] = fsType
|
parameters["fsType"] = fsType
|
||||||
}
|
}
|
||||||
ns := a.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
@@ -1435,13 +1429,15 @@ func (a *azureDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *azureDriver) CreateDriver() {
|
func (a *azureDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: a,
|
||||||
|
Prefix: "azure",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *azureDriver) CleanupDriver() {
|
func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
}
|
|
||||||
|
|
||||||
func (a *azureDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
|
||||||
By("creating a test azure disk volume")
|
By("creating a test azure disk volume")
|
||||||
volumeName, err := framework.CreatePDWithRetry()
|
volumeName, err := framework.CreatePDWithRetry()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@@ -1470,7 +1466,7 @@ var _ testsuites.TestDriver = &awsDriver{}
|
|||||||
var _ testsuites.DynamicPVTestDriver = &awsDriver{}
|
var _ testsuites.DynamicPVTestDriver = &awsDriver{}
|
||||||
|
|
||||||
// InitAwsDriver returns awsDriver that implements TestDriver interface
|
// InitAwsDriver returns awsDriver that implements TestDriver interface
|
||||||
func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitAwsDriver() testsuites.TestDriver {
|
||||||
return &awsDriver{
|
return &awsDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: "aws",
|
Name: "aws",
|
||||||
@@ -1486,8 +1482,6 @@ func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver {
|
|||||||
testsuites.CapBlock: true,
|
testsuites.CapBlock: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
},
|
},
|
||||||
|
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1529,13 +1523,13 @@ func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu
|
|||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
func (a *awsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass {
|
func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
provisioner := "kubernetes.io/aws-ebs"
|
provisioner := "kubernetes.io/aws-ebs"
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
if fsType != "" {
|
if fsType != "" {
|
||||||
parameters["fsType"] = fsType
|
parameters["fsType"] = fsType
|
||||||
}
|
}
|
||||||
ns := a.driverInfo.Config.Framework.Namespace.Name
|
ns := config.Framework.Namespace.Name
|
||||||
suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
|
suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name)
|
||||||
|
|
||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
@@ -1545,15 +1539,17 @@ func (a *awsDriver) GetClaimSize() string {
|
|||||||
return "5Gi"
|
return "5Gi"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *awsDriver) CreateDriver() {
|
func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
}
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: a,
|
||||||
func (a *awsDriver) CleanupDriver() {
|
Prefix: "aws",
|
||||||
|
Framework: f,
|
||||||
|
}, func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Fix authorization error in attach operation and uncomment below
|
// TODO: Fix authorization error in attach operation and uncomment below
|
||||||
/*
|
/*
|
||||||
func (a *awsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
By("creating a test aws volume")
|
By("creating a test aws volume")
|
||||||
var err error
|
var err error
|
||||||
a.volumeName, err = framework.CreatePDWithRetry()
|
a.volumeName, err = framework.CreatePDWithRetry()
|
||||||
@@ -1617,7 +1613,7 @@ var _ testsuites.TestDriver = &localDriver{}
|
|||||||
var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{}
|
var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{}
|
||||||
var _ testsuites.PreprovisionedPVTestDriver = &localDriver{}
|
var _ testsuites.PreprovisionedPVTestDriver = &localDriver{}
|
||||||
|
|
||||||
func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config testsuites.TestConfig) testsuites.TestDriver {
|
func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() testsuites.TestDriver {
|
||||||
maxFileSize := defaultLocalVolumeMaxFileSize
|
maxFileSize := defaultLocalVolumeMaxFileSize
|
||||||
if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok {
|
if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok {
|
||||||
maxFileSize = maxFileSizeByVolType
|
maxFileSize = maxFileSizeByVolType
|
||||||
@@ -1630,8 +1626,7 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config
|
|||||||
if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok {
|
if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok {
|
||||||
capabilities = capabilitiesByType
|
capabilities = capabilitiesByType
|
||||||
}
|
}
|
||||||
return func(config testsuites.TestConfig) testsuites.TestDriver {
|
return func() testsuites.TestDriver {
|
||||||
hostExec := utils.NewHostExec(config.Framework)
|
|
||||||
// custom tag to distinguish from tests of other volume types
|
// custom tag to distinguish from tests of other volume types
|
||||||
featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType)
|
featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType)
|
||||||
// For GCE Local SSD volumes, we must run serially
|
// For GCE Local SSD volumes, we must run serially
|
||||||
@@ -1645,11 +1640,8 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config
|
|||||||
MaxFileSize: maxFileSize,
|
MaxFileSize: maxFileSize,
|
||||||
SupportedFsType: supportedFsTypes,
|
SupportedFsType: supportedFsTypes,
|
||||||
Capabilities: capabilities,
|
Capabilities: capabilities,
|
||||||
Config: config,
|
|
||||||
},
|
},
|
||||||
hostExec: hostExec,
|
|
||||||
volumeType: volumeType,
|
volumeType: volumeType,
|
||||||
ltrMgr: utils.NewLocalResourceManager("local-driver", hostExec, "/tmp"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1673,28 +1665,30 @@ func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *localDriver) CreateDriver() {
|
func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||||
// choose a randome node to test against
|
// choose a randome node to test against
|
||||||
l.node = l.randomNode()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *localDriver) CleanupDriver() {
|
|
||||||
l.hostExec.Cleanup()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *localDriver) randomNode() *v1.Node {
|
|
||||||
f := l.driverInfo.Config.Framework
|
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
node := nodes.Items[rand.Intn(len(nodes.Items))]
|
l.node = &nodes.Items[rand.Intn(len(nodes.Items))]
|
||||||
return &node
|
|
||||||
|
l.hostExec = utils.NewHostExec(f)
|
||||||
|
l.ltrMgr = utils.NewLocalResourceManager("local-driver", l.hostExec, "/tmp")
|
||||||
|
|
||||||
|
return &testsuites.PerTestConfig{
|
||||||
|
Driver: l,
|
||||||
|
Prefix: "local",
|
||||||
|
Framework: f,
|
||||||
|
ClientNodeName: l.node.Name,
|
||||||
|
}, func() {
|
||||||
|
l.hostExec.Cleanup()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume {
|
func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume {
|
||||||
switch volType {
|
switch volType {
|
||||||
case testpatterns.PreprovisionedPV:
|
case testpatterns.PreprovisionedPV:
|
||||||
node := l.node
|
node := l.node
|
||||||
// assign this to schedule pod on this node
|
// assign this to schedule pod on this node
|
||||||
l.driverInfo.Config.ClientNodeName = node.Name
|
config.ClientNodeName = node.Name
|
||||||
return &localVolume{
|
return &localVolume{
|
||||||
ltrMgr: l.ltrMgr,
|
ltrMgr: l.ltrMgr,
|
||||||
ltr: l.ltrMgr.Create(node, l.volumeType, nil),
|
ltr: l.ltrMgr.Create(node, l.volumeType, nil),
|
||||||
|
@@ -18,7 +18,6 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
|
||||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||||
@@ -26,7 +25,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// List of testDrivers to be executed in below loop
|
// List of testDrivers to be executed in below loop
|
||||||
var testDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{
|
var testDrivers = []func() testsuites.TestDriver{
|
||||||
drivers.InitNFSDriver,
|
drivers.InitNFSDriver,
|
||||||
drivers.InitGlusterFSDriver,
|
drivers.InitGlusterFSDriver,
|
||||||
drivers.InitISCSIDriver,
|
drivers.InitISCSIDriver,
|
||||||
@@ -65,35 +64,11 @@ func intreeTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestP
|
|||||||
|
|
||||||
// This executes testSuites for in-tree volumes.
|
// This executes testSuites for in-tree volumes.
|
||||||
var _ = utils.SIGDescribe("In-tree Volumes", func() {
|
var _ = utils.SIGDescribe("In-tree Volumes", func() {
|
||||||
f := framework.NewDefaultFramework("volumes")
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Common configuration options for all drivers.
|
|
||||||
config = testsuites.TestConfig{
|
|
||||||
Framework: f,
|
|
||||||
Prefix: "in-tree",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, initDriver := range testDrivers {
|
for _, initDriver := range testDrivers {
|
||||||
curDriver := initDriver(config)
|
curDriver := initDriver()
|
||||||
curConfig := curDriver.GetDriverInfo().Config
|
|
||||||
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
|
Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||||
BeforeEach(func() {
|
testsuites.DefineTestSuite(curDriver, testSuites, intreeTunePattern)
|
||||||
// Reset config. The driver might have modified its copy
|
|
||||||
// in a previous test.
|
|
||||||
curDriver.GetDriverInfo().Config = curConfig
|
|
||||||
|
|
||||||
// setupDriver
|
|
||||||
curDriver.CreateDriver()
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
// Cleanup driver
|
|
||||||
curDriver.CleanupDriver()
|
|
||||||
})
|
|
||||||
|
|
||||||
testsuites.RunTestSuite(f, curDriver, testSuites, intreeTunePattern)
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@@ -143,10 +143,11 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
class := newStorageClass(test, ns, "" /* suffix */)
|
test.Client = c
|
||||||
claim := newClaim(test, ns, "" /* suffix */)
|
test.Class = newStorageClass(test, ns, "" /* suffix */)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim = newClaim(test, ns, "" /* suffix */)
|
||||||
testsuites.TestDynamicProvisioning(test, c, claim, class)
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
|
test.TestDynamicProvisioning()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -301,6 +302,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
|
|||||||
|
|
||||||
func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
|
func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
|
||||||
test := testsuites.StorageClassTest{
|
test := testsuites.StorageClassTest{
|
||||||
|
Client: c,
|
||||||
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
|
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
|
||||||
Provisioner: "kubernetes.io/gce-pd",
|
Provisioner: "kubernetes.io/gce-pd",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
@@ -312,14 +314,14 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
|
|||||||
}
|
}
|
||||||
|
|
||||||
suffix := "delayed-regional"
|
suffix := "delayed-regional"
|
||||||
class := newStorageClass(test, ns, suffix)
|
test.Class = newStorageClass(test, ns, suffix)
|
||||||
var claims []*v1.PersistentVolumeClaim
|
var claims []*v1.PersistentVolumeClaim
|
||||||
for i := 0; i < pvcCount; i++ {
|
for i := 0; i < pvcCount; i++ {
|
||||||
claim := newClaim(test, ns, suffix)
|
claim := newClaim(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
claim.Spec.StorageClassName = &test.Class.Name
|
||||||
claims = append(claims, claim)
|
claims = append(claims, claim)
|
||||||
}
|
}
|
||||||
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
|
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
|
||||||
if node == nil {
|
if node == nil {
|
||||||
framework.Failf("unexpected nil node found")
|
framework.Failf("unexpected nil node found")
|
||||||
}
|
}
|
||||||
@@ -345,17 +347,20 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
suffix := "topo-regional"
|
suffix := "topo-regional"
|
||||||
class := newStorageClass(test, ns, suffix)
|
test.Client = c
|
||||||
|
test.Class = newStorageClass(test, ns, suffix)
|
||||||
zones := getTwoRandomZones(c)
|
zones := getTwoRandomZones(c)
|
||||||
addAllowedTopologiesToStorageClass(c, class, zones)
|
addAllowedTopologiesToStorageClass(c, test.Class, zones)
|
||||||
claim := newClaim(test, ns, suffix)
|
test.Claim = newClaim(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
|
|
||||||
|
pv := test.TestDynamicProvisioning()
|
||||||
checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true)
|
checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
|
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
|
||||||
test := testsuites.StorageClassTest{
|
test := testsuites.StorageClassTest{
|
||||||
|
Client: c,
|
||||||
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
|
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
|
||||||
Provisioner: "kubernetes.io/gce-pd",
|
Provisioner: "kubernetes.io/gce-pd",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
@@ -367,16 +372,16 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
|
|||||||
}
|
}
|
||||||
|
|
||||||
suffix := "topo-delayed-regional"
|
suffix := "topo-delayed-regional"
|
||||||
class := newStorageClass(test, ns, suffix)
|
test.Class = newStorageClass(test, ns, suffix)
|
||||||
topoZones := getTwoRandomZones(c)
|
topoZones := getTwoRandomZones(c)
|
||||||
addAllowedTopologiesToStorageClass(c, class, topoZones)
|
addAllowedTopologiesToStorageClass(c, test.Class, topoZones)
|
||||||
var claims []*v1.PersistentVolumeClaim
|
var claims []*v1.PersistentVolumeClaim
|
||||||
for i := 0; i < pvcCount; i++ {
|
for i := 0; i < pvcCount; i++ {
|
||||||
claim := newClaim(test, ns, suffix)
|
claim := newClaim(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
claim.Spec.StorageClassName = &test.Class.Name
|
||||||
claims = append(claims, claim)
|
claims = append(claims, claim)
|
||||||
}
|
}
|
||||||
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
|
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
|
||||||
if node == nil {
|
if node == nil {
|
||||||
framework.Failf("unexpected nil node found")
|
framework.Failf("unexpected nil node found")
|
||||||
}
|
}
|
||||||
|
@@ -17,7 +17,9 @@ limitations under the License.
|
|||||||
package testsuites
|
package testsuites
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@@ -32,6 +34,7 @@ import (
|
|||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/podlogs"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -39,10 +42,10 @@ import (
|
|||||||
type TestSuite interface {
|
type TestSuite interface {
|
||||||
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
|
// getTestSuiteInfo returns the TestSuiteInfo for this TestSuite
|
||||||
getTestSuiteInfo() TestSuiteInfo
|
getTestSuiteInfo() TestSuiteInfo
|
||||||
// skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver
|
// defineTest defines tests of the testpattern for the driver.
|
||||||
skipUnsupportedTest(testpatterns.TestPattern, TestDriver)
|
// Called inside a Ginkgo context that reflects the current driver and test pattern,
|
||||||
// execTest executes test of the testpattern for the driver
|
// so the test suite can define tests directly with ginkgo.It.
|
||||||
execTest(TestDriver, testpatterns.TestPattern)
|
defineTests(TestDriver, testpatterns.TestPattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSuiteInfo represents a set of parameters for TestSuite
|
// TestSuiteInfo represents a set of parameters for TestSuite
|
||||||
@@ -54,11 +57,8 @@ type TestSuiteInfo struct {
|
|||||||
|
|
||||||
// TestResource represents an interface for resources that is used by TestSuite
|
// TestResource represents an interface for resources that is used by TestSuite
|
||||||
type TestResource interface {
|
type TestResource interface {
|
||||||
// setupResource sets up test resources to be used for the tests with the
|
// cleanupResource cleans up the test resources created when setting up the resource
|
||||||
// combination of TestDriver and TestPattern
|
cleanupResource()
|
||||||
setupResource(TestDriver, testpatterns.TestPattern)
|
|
||||||
// cleanupResource clean up the test resources created in SetupResource
|
|
||||||
cleanupResource(TestDriver, testpatterns.TestPattern)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
|
func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
|
||||||
@@ -66,27 +66,36 @@ func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string {
|
|||||||
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
|
return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunTestSuite runs all testpatterns of all testSuites for a driver
|
// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver
|
||||||
func RunTestSuite(f *framework.Framework, driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) {
|
func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) {
|
||||||
for _, testSuiteInit := range tsInits {
|
for _, testSuiteInit := range tsInits {
|
||||||
suite := testSuiteInit()
|
suite := testSuiteInit()
|
||||||
patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns)
|
patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns)
|
||||||
|
|
||||||
for _, pattern := range patterns {
|
for _, pattern := range patterns {
|
||||||
suite.execTest(driver, pattern)
|
p := pattern
|
||||||
|
Context(getTestNameStr(suite, p), func() {
|
||||||
|
BeforeEach(func() {
|
||||||
|
// Skip unsupported tests to avoid unnecessary resource initialization
|
||||||
|
skipUnsupportedTest(driver, p)
|
||||||
|
})
|
||||||
|
suite.defineTests(driver, p)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern
|
// skipUnsupportedTest will skip tests if the combination of driver, and testpattern
|
||||||
// is not suitable to be tested.
|
// is not suitable to be tested.
|
||||||
// Whether it needs to be skipped is checked by following steps:
|
// Whether it needs to be skipped is checked by following steps:
|
||||||
// 1. Check if Whether SnapshotType is supported by driver from its interface
|
// 1. Check if Whether SnapshotType is supported by driver from its interface
|
||||||
// 2. Check if Whether volType is supported by driver from its interface
|
// 2. Check if Whether volType is supported by driver from its interface
|
||||||
// 3. Check if fsType is supported
|
// 3. Check if fsType is supported
|
||||||
// 4. Check with driver specific logic
|
// 4. Check with driver specific logic
|
||||||
// 5. Check with testSuite specific logic
|
//
|
||||||
func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpatterns.TestPattern) {
|
// Test suites can also skip tests inside their own defineTests function or in
|
||||||
|
// individual tests.
|
||||||
|
func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
dInfo := driver.GetDriverInfo()
|
dInfo := driver.GetDriverInfo()
|
||||||
var isSupported bool
|
var isSupported bool
|
||||||
|
|
||||||
@@ -130,9 +139,6 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
|
|||||||
|
|
||||||
// 4. Check with driver specific logic
|
// 4. Check with driver specific logic
|
||||||
driver.SkipUnsupportedTest(pattern)
|
driver.SkipUnsupportedTest(pattern)
|
||||||
|
|
||||||
// 5. Check with testSuite specific logic
|
|
||||||
suite.skipUnsupportedTest(pattern, driver)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
|
// genericVolumeTestResource is a generic implementation of TestResource that wil be able to
|
||||||
@@ -141,6 +147,8 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern
|
|||||||
// Also, see subpath.go in the same directory for how to extend and use it.
|
// Also, see subpath.go in the same directory for how to extend and use it.
|
||||||
type genericVolumeTestResource struct {
|
type genericVolumeTestResource struct {
|
||||||
driver TestDriver
|
driver TestDriver
|
||||||
|
config *PerTestConfig
|
||||||
|
pattern testpatterns.TestPattern
|
||||||
volType string
|
volType string
|
||||||
volSource *v1.VolumeSource
|
volSource *v1.VolumeSource
|
||||||
pvc *v1.PersistentVolumeClaim
|
pvc *v1.PersistentVolumeClaim
|
||||||
@@ -152,17 +160,20 @@ type genericVolumeTestResource struct {
|
|||||||
|
|
||||||
var _ TestResource = &genericVolumeTestResource{}
|
var _ TestResource = &genericVolumeTestResource{}
|
||||||
|
|
||||||
// setupResource sets up genericVolumeTestResource
|
func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern) *genericVolumeTestResource {
|
||||||
func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
r := genericVolumeTestResource{
|
||||||
r.driver = driver
|
driver: driver,
|
||||||
|
config: config,
|
||||||
|
pattern: pattern,
|
||||||
|
}
|
||||||
dInfo := driver.GetDriverInfo()
|
dInfo := driver.GetDriverInfo()
|
||||||
f := dInfo.Config.Framework
|
f := config.Framework
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
fsType := pattern.FsType
|
fsType := pattern.FsType
|
||||||
volType := pattern.VolType
|
volType := pattern.VolType
|
||||||
|
|
||||||
// Create volume for pre-provisioned volume tests
|
// Create volume for pre-provisioned volume tests
|
||||||
r.volume = CreateVolume(driver, volType)
|
r.volume = CreateVolume(driver, config, volType)
|
||||||
|
|
||||||
switch volType {
|
switch volType {
|
||||||
case testpatterns.InlineVolume:
|
case testpatterns.InlineVolume:
|
||||||
@@ -184,7 +195,7 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
|
|||||||
framework.Logf("Creating resource for dynamic PV")
|
framework.Logf("Creating resource for dynamic PV")
|
||||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
||||||
claimSize := dDriver.GetClaimSize()
|
claimSize := dDriver.GetClaimSize()
|
||||||
r.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
|
r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType)
|
||||||
|
|
||||||
By("creating a StorageClass " + r.sc.Name)
|
By("creating a StorageClass " + r.sc.Name)
|
||||||
var err error
|
var err error
|
||||||
@@ -204,13 +215,14 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes
|
|||||||
if r.volSource == nil {
|
if r.volSource == nil {
|
||||||
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
|
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return &r
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanupResource cleans up genericVolumeTestResource
|
// cleanupResource cleans up genericVolumeTestResource
|
||||||
func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
func (r *genericVolumeTestResource) cleanupResource() {
|
||||||
dInfo := driver.GetDriverInfo()
|
f := r.config.Framework
|
||||||
f := dInfo.Config.Framework
|
volType := r.pattern.VolType
|
||||||
volType := pattern.VolType
|
|
||||||
|
|
||||||
if r.pvc != nil || r.pv != nil {
|
if r.pvc != nil || r.pv != nil {
|
||||||
switch volType {
|
switch volType {
|
||||||
@@ -356,7 +368,7 @@ func deleteStorageClass(cs clientset.Interface, className string) {
|
|||||||
// the testsuites package whereas framework.VolumeTestConfig is merely
|
// the testsuites package whereas framework.VolumeTestConfig is merely
|
||||||
// an implementation detail. It contains fields that have no effect,
|
// an implementation detail. It contains fields that have no effect,
|
||||||
// which makes it unsuitable for use in the testsuits public API.
|
// which makes it unsuitable for use in the testsuits public API.
|
||||||
func convertTestConfig(in *TestConfig) framework.VolumeTestConfig {
|
func convertTestConfig(in *PerTestConfig) framework.VolumeTestConfig {
|
||||||
if in.ServerConfig != nil {
|
if in.ServerConfig != nil {
|
||||||
return *in.ServerConfig
|
return *in.ServerConfig
|
||||||
}
|
}
|
||||||
@@ -390,3 +402,42 @@ func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.U
|
|||||||
|
|
||||||
return snapshot
|
return snapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StartPodLogs begins capturing log output and events from current
|
||||||
|
// and future pods running in the namespace of the framework. That
|
||||||
|
// ends when the returned cleanup function is called.
|
||||||
|
//
|
||||||
|
// The output goes to log files (when using --report-dir, as in the
|
||||||
|
// CI) or the output stream (otherwise).
|
||||||
|
func StartPodLogs(f *framework.Framework) func() {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
cs := f.ClientSet
|
||||||
|
ns := f.Namespace
|
||||||
|
|
||||||
|
to := podlogs.LogOutput{
|
||||||
|
StatusWriter: GinkgoWriter,
|
||||||
|
}
|
||||||
|
if framework.TestContext.ReportDir == "" {
|
||||||
|
to.LogWriter = GinkgoWriter
|
||||||
|
} else {
|
||||||
|
test := CurrentGinkgoTestDescription()
|
||||||
|
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
|
||||||
|
// We end the prefix with a slash to ensure that all logs
|
||||||
|
// end up in a directory named after the current test.
|
||||||
|
//
|
||||||
|
// TODO: use a deeper directory hierarchy once gubernator
|
||||||
|
// supports that (https://github.com/kubernetes/test-infra/issues/10289).
|
||||||
|
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
|
||||||
|
reg.ReplaceAllString(test.FullTestText, "_") + "/"
|
||||||
|
}
|
||||||
|
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
|
||||||
|
|
||||||
|
// pod events are something that the framework already collects itself
|
||||||
|
// after a failed test. Logging them live is only useful for interactive
|
||||||
|
// debugging, not when we collect reports.
|
||||||
|
if framework.TestContext.ReportDir == "" {
|
||||||
|
podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cancel
|
||||||
|
}
|
||||||
|
@@ -37,13 +37,13 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateVolume creates volume for test unless dynamicPV test
|
// CreateVolume creates volume for test unless dynamicPV test
|
||||||
func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) TestVolume {
|
func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume {
|
||||||
switch volType {
|
switch volType {
|
||||||
case testpatterns.InlineVolume:
|
case testpatterns.InlineVolume:
|
||||||
fallthrough
|
fallthrough
|
||||||
case testpatterns.PreprovisionedPV:
|
case testpatterns.PreprovisionedPV:
|
||||||
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
|
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
|
||||||
return pDriver.CreateVolume(volType)
|
return pDriver.CreateVolume(config, volType)
|
||||||
}
|
}
|
||||||
case testpatterns.DynamicPV:
|
case testpatterns.DynamicPV:
|
||||||
// No need to create volume
|
// No need to create volume
|
||||||
@@ -103,8 +103,3 @@ func GetSnapshotClass(
|
|||||||
|
|
||||||
return snapshotClass
|
return snapshotClass
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
|
|
||||||
func GetUniqueDriverName(driver TestDriver) string {
|
|
||||||
return fmt.Sprintf("%s-%s", driver.GetDriverInfo().Name, driver.GetDriverInfo().Config.Framework.UniqueName)
|
|
||||||
}
|
|
||||||
|
@@ -41,6 +41,9 @@ import (
|
|||||||
// StorageClassTest represents parameters to be used by provisioning tests.
|
// StorageClassTest represents parameters to be used by provisioning tests.
|
||||||
// Not all parameters are used by all tests.
|
// Not all parameters are used by all tests.
|
||||||
type StorageClassTest struct {
|
type StorageClassTest struct {
|
||||||
|
Client clientset.Interface
|
||||||
|
Claim *v1.PersistentVolumeClaim
|
||||||
|
Class *storage.StorageClass
|
||||||
Name string
|
Name string
|
||||||
CloudProviders []string
|
CloudProviders []string
|
||||||
Provisioner string
|
Provisioner string
|
||||||
@@ -76,183 +79,156 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
return p.tsInfo
|
return p.tsInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *provisioningTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
|
func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
}
|
var (
|
||||||
|
dInfo = driver.GetDriverInfo()
|
||||||
|
dDriver DynamicPVTestDriver
|
||||||
|
config *PerTestConfig
|
||||||
|
testCleanup func()
|
||||||
|
testCase *StorageClassTest
|
||||||
|
cs clientset.Interface
|
||||||
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
sc *storage.StorageClass
|
||||||
|
)
|
||||||
|
|
||||||
func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPattern) (provisioningTestResource, provisioningTestInput) {
|
BeforeEach(func() {
|
||||||
// Setup test resource for driver and testpattern
|
// Check preconditions.
|
||||||
resource := provisioningTestResource{}
|
if pattern.VolType != testpatterns.DynamicPV {
|
||||||
resource.setupResource(driver, pattern)
|
framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType)
|
||||||
|
}
|
||||||
input := provisioningTestInput{
|
ok := false
|
||||||
testCase: StorageClassTest{
|
dDriver, ok = driver.(DynamicPVTestDriver)
|
||||||
ClaimSize: resource.claimSize,
|
if !ok {
|
||||||
ExpectedSize: resource.claimSize,
|
framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||||
},
|
}
|
||||||
cs: driver.GetDriverInfo().Config.Framework.ClientSet,
|
})
|
||||||
dc: driver.GetDriverInfo().Config.Framework.DynamicClient,
|
|
||||||
pvc: resource.pvc,
|
// This intentionally comes after checking the preconditions because it
|
||||||
sc: resource.sc,
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
vsc: resource.vsc,
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
dInfo: driver.GetDriverInfo(),
|
// f must run inside an It or Context callback.
|
||||||
nodeName: driver.GetDriverInfo().Config.ClientNodeName,
|
f := framework.NewDefaultFramework("provisioning")
|
||||||
}
|
|
||||||
|
init := func() {
|
||||||
return resource, input
|
// Now do the more expensive test initialization.
|
||||||
}
|
config, testCleanup = driver.PrepareTest(f)
|
||||||
|
cs = config.Framework.ClientSet
|
||||||
func (p *provisioningTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
claimSize := dDriver.GetClaimSize()
|
||||||
Context(getTestNameStr(p, pattern), func() {
|
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
|
||||||
var (
|
if sc == nil {
|
||||||
resource provisioningTestResource
|
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
||||||
input provisioningTestInput
|
}
|
||||||
needsCleanup bool
|
pvc = getClaim(claimSize, config.Framework.Namespace.Name)
|
||||||
)
|
pvc.Spec.StorageClassName = &sc.Name
|
||||||
|
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", sc, pvc)
|
||||||
BeforeEach(func() {
|
testCase = &StorageClassTest{
|
||||||
needsCleanup = false
|
Client: config.Framework.ClientSet,
|
||||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
Claim: pvc,
|
||||||
skipUnsupportedTest(p, driver, pattern)
|
Class: sc,
|
||||||
needsCleanup = true
|
ClaimSize: claimSize,
|
||||||
|
ExpectedSize: claimSize,
|
||||||
// Create test input
|
|
||||||
resource, input = createProvisioningTestInput(driver, pattern)
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
if needsCleanup {
|
|
||||||
resource.cleanupResource(driver, pattern)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Ginkgo's "Global Shared Behaviors" require arguments for a shared function
|
|
||||||
// to be a single struct and to be passed as a pointer.
|
|
||||||
// Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details.
|
|
||||||
testProvisioning(&input)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type provisioningTestResource struct {
|
|
||||||
driver TestDriver
|
|
||||||
|
|
||||||
claimSize string
|
|
||||||
sc *storage.StorageClass
|
|
||||||
pvc *v1.PersistentVolumeClaim
|
|
||||||
// follow parameter is used to test provision volume from snapshot
|
|
||||||
vsc *unstructured.Unstructured
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ TestResource = &provisioningTestResource{}
|
|
||||||
|
|
||||||
func (p *provisioningTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
// Setup provisioningTest resource
|
|
||||||
switch pattern.VolType {
|
|
||||||
case testpatterns.DynamicPV:
|
|
||||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
|
||||||
p.sc = dDriver.GetDynamicProvisionStorageClass("")
|
|
||||||
if p.sc == nil {
|
|
||||||
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
|
|
||||||
}
|
|
||||||
p.driver = driver
|
|
||||||
p.claimSize = dDriver.GetClaimSize()
|
|
||||||
p.pvc = getClaim(p.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name)
|
|
||||||
p.pvc.Spec.StorageClassName = &p.sc.Name
|
|
||||||
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", p.sc, p.pvc)
|
|
||||||
if sDriver, ok := driver.(SnapshottableTestDriver); ok {
|
|
||||||
p.vsc = sDriver.GetSnapshotClass()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
framework.Failf("Dynamic Provision test doesn't support: %s", pattern.VolType)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (p *provisioningTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
cleanup := func() {
|
||||||
}
|
if testCleanup != nil {
|
||||||
|
testCleanup()
|
||||||
type provisioningTestInput struct {
|
testCleanup = nil
|
||||||
testCase StorageClassTest
|
}
|
||||||
cs clientset.Interface
|
|
||||||
dc dynamic.Interface
|
|
||||||
pvc *v1.PersistentVolumeClaim
|
|
||||||
sc *storage.StorageClass
|
|
||||||
vsc *unstructured.Unstructured
|
|
||||||
dInfo *DriverInfo
|
|
||||||
nodeName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func testProvisioning(input *provisioningTestInput) {
|
|
||||||
// common checker for most of the test cases below
|
|
||||||
pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
|
||||||
PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
It("should provision storage with defaults", func() {
|
It("should provision storage with defaults", func() {
|
||||||
input.testCase.PvCheck = pvcheck
|
init()
|
||||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
defer cleanup()
|
||||||
|
|
||||||
|
testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should provision storage with mount options", func() {
|
It("should provision storage with mount options", func() {
|
||||||
if input.dInfo.SupportedMountOption == nil {
|
if dInfo.SupportedMountOption == nil {
|
||||||
framework.Skipf("Driver %q does not define supported mount option - skipping", input.dInfo.Name)
|
framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List()
|
init()
|
||||||
input.testCase.PvCheck = pvcheck
|
defer cleanup()
|
||||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
|
||||||
|
testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
|
||||||
|
testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should access volume from different nodes", func() {
|
It("should access volume from different nodes", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// The assumption is that if the test hasn't been
|
// The assumption is that if the test hasn't been
|
||||||
// locked onto a single node, then the driver is
|
// locked onto a single node, then the driver is
|
||||||
// usable on all of them *and* supports accessing a volume
|
// usable on all of them *and* supports accessing a volume
|
||||||
// from any node.
|
// from any node.
|
||||||
if input.nodeName != "" {
|
if config.ClientNodeName != "" {
|
||||||
framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name)
|
framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that we actually have more than one node.
|
// Ensure that we actually have more than one node.
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(input.cs)
|
nodes := framework.GetReadySchedulableNodesOrDie(cs)
|
||||||
if len(nodes.Items) <= 1 {
|
if len(nodes.Items) <= 1 {
|
||||||
framework.Skipf("need more than one node - skipping")
|
framework.Skipf("need more than one node - skipping")
|
||||||
}
|
}
|
||||||
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||||
PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName})
|
PVMultiNodeCheck(cs, claim, volume, NodeSelection{Name: config.ClientNodeName})
|
||||||
}
|
}
|
||||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should create and delete block persistent volumes", func() {
|
It("should create and delete block persistent volumes", func() {
|
||||||
if !input.dInfo.Capabilities[CapBlock] {
|
if !dInfo.Capabilities[CapBlock] {
|
||||||
framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name)
|
framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
block := v1.PersistentVolumeBlock
|
block := v1.PersistentVolumeBlock
|
||||||
input.testCase.VolumeMode = &block
|
testCase.VolumeMode = &block
|
||||||
input.pvc.Spec.VolumeMode = &block
|
pvc.Spec.VolumeMode = &block
|
||||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
|
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
|
||||||
if !input.dInfo.Capabilities[CapDataSource] {
|
if !dInfo.Capabilities[CapDataSource] {
|
||||||
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name)
|
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc)
|
sDriver, ok := driver.(SnapshottableTestDriver)
|
||||||
|
if !ok {
|
||||||
|
framework.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
dc := config.Framework.DynamicClient
|
||||||
|
vsc := sDriver.GetSnapshotClass(config)
|
||||||
|
dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: config.ClientNodeName}, cs, dc, pvc, sc, vsc)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
input.pvc.Spec.DataSource = dataSource
|
pvc.Spec.DataSource = dataSource
|
||||||
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||||
By("checking whether the created volume has the pre-populated data")
|
By("checking whether the created volume has the pre-populated data")
|
||||||
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
|
||||||
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName})
|
RunInPodWithVolume(cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: config.ClientNodeName})
|
||||||
}
|
}
|
||||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should allow concurrent writes on the same node", func() {
|
It("should allow concurrent writes on the same node", func() {
|
||||||
if !input.dInfo.Capabilities[CapMultiPODs] {
|
if !dInfo.Capabilities[CapMultiPODs] {
|
||||||
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
|
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||||
// We start two pods concurrently on the same node,
|
// We start two pods concurrently on the same node,
|
||||||
// using the same PVC. Both wait for other to create a
|
// using the same PVC. Both wait for other to create a
|
||||||
// file before returning. The pods are forced onto the
|
// file before returning. The pods are forced onto the
|
||||||
@@ -265,7 +241,7 @@ func testProvisioning(input *provisioningTestInput) {
|
|||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
node := NodeSelection{
|
node := NodeSelection{
|
||||||
Name: input.nodeName,
|
Name: config.ClientNodeName,
|
||||||
}
|
}
|
||||||
if podName == secondPodName {
|
if podName == secondPodName {
|
||||||
node.Affinity = &v1.Affinity{
|
node.Affinity = &v1.Affinity{
|
||||||
@@ -283,18 +259,24 @@ func testProvisioning(input *provisioningTestInput) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
|
RunInPodWithVolume(cs, claim.Namespace, claim.Name, podName, command, node)
|
||||||
}
|
}
|
||||||
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
|
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
|
||||||
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
|
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
testCase.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
|
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
|
||||||
func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) *v1.PersistentVolume {
|
func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||||
|
client := t.Client
|
||||||
|
Expect(client).NotTo(BeNil(), "StorageClassTest.Client is required")
|
||||||
|
claim := t.Claim
|
||||||
|
Expect(claim).NotTo(BeNil(), "StorageClassTest.Claim is required")
|
||||||
|
class := t.Class
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if class != nil {
|
if class != nil {
|
||||||
Expect(*claim.Spec.StorageClassName).To(Equal(class.Name))
|
Expect(*claim.Spec.StorageClassName).To(Equal(class.Name))
|
||||||
@@ -493,29 +475,29 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
|
|||||||
pod = nil
|
pod = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
|
func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) {
|
||||||
pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable)
|
pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable)
|
||||||
if pvs == nil {
|
if pvs == nil {
|
||||||
return nil, node
|
return nil, node
|
||||||
}
|
}
|
||||||
return pvs[0], node
|
return pvs[0], node
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
|
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
|
||||||
var err error
|
var err error
|
||||||
Expect(len(claims)).ToNot(Equal(0))
|
Expect(len(claims)).ToNot(Equal(0))
|
||||||
namespace := claims[0].Namespace
|
namespace := claims[0].Namespace
|
||||||
|
|
||||||
By("creating a storage class " + class.Name)
|
By("creating a storage class " + t.Class.Name)
|
||||||
class, err = client.StorageV1().StorageClasses().Create(class)
|
class, err := t.Client.StorageV1().StorageClasses().Create(t.Class)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer deleteStorageClass(client, class.Name)
|
defer deleteStorageClass(t.Client, class.Name)
|
||||||
|
|
||||||
By("creating claims")
|
By("creating claims")
|
||||||
var claimNames []string
|
var claimNames []string
|
||||||
var createdClaims []*v1.PersistentVolumeClaim
|
var createdClaims []*v1.PersistentVolumeClaim
|
||||||
for _, claim := range claims {
|
for _, claim := range claims {
|
||||||
c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
||||||
claimNames = append(claimNames, c.Name)
|
claimNames = append(claimNames, c.Name)
|
||||||
createdClaims = append(createdClaims, c)
|
createdClaims = append(createdClaims, c)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@@ -523,7 +505,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
|
|||||||
defer func() {
|
defer func() {
|
||||||
var errors map[string]error
|
var errors map[string]error
|
||||||
for _, claim := range createdClaims {
|
for _, claim := range createdClaims {
|
||||||
err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace)
|
err := framework.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors[claim.Name] = err
|
errors[claim.Name] = err
|
||||||
}
|
}
|
||||||
@@ -537,44 +519,44 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse
|
|||||||
|
|
||||||
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
|
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
|
||||||
By("checking the claims are in pending state")
|
By("checking the claims are in pending state")
|
||||||
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
|
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
verifyPVCsPending(client, createdClaims)
|
verifyPVCsPending(t.Client, createdClaims)
|
||||||
|
|
||||||
By("creating a pod referring to the claims")
|
By("creating a pod referring to the claims")
|
||||||
// Create a pod referring to the claim and wait for it to get to running
|
// Create a pod referring to the claim and wait for it to get to running
|
||||||
var pod *v1.Pod
|
var pod *v1.Pod
|
||||||
if expectUnschedulable {
|
if expectUnschedulable {
|
||||||
pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
|
pod, err = framework.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
|
||||||
} else {
|
} else {
|
||||||
pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
|
pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
|
||||||
}
|
}
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
|
framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
|
||||||
framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
|
||||||
}()
|
}()
|
||||||
if expectUnschedulable {
|
if expectUnschedulable {
|
||||||
// Verify that no claims are provisioned.
|
// Verify that no claims are provisioned.
|
||||||
verifyPVCsPending(client, createdClaims)
|
verifyPVCsPending(t.Client, createdClaims)
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// collect node details
|
// collect node details
|
||||||
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("re-checking the claims to see they binded")
|
By("re-checking the claims to see they binded")
|
||||||
var pvs []*v1.PersistentVolume
|
var pvs []*v1.PersistentVolume
|
||||||
for _, claim := range createdClaims {
|
for _, claim := range createdClaims {
|
||||||
// Get new copy of the claim
|
// Get new copy of the claim
|
||||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
// make sure claim did bind
|
// make sure claim did bind
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
pvs = append(pvs, pv)
|
pvs = append(pvs, pv)
|
||||||
}
|
}
|
||||||
|
@@ -24,13 +24,10 @@ import (
|
|||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
|
||||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/client-go/dynamic"
|
"k8s.io/client-go/dynamic"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
)
|
)
|
||||||
@@ -48,13 +45,12 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type SnapshotClassTest struct {
|
type SnapshotClassTest struct {
|
||||||
Name string
|
Name string
|
||||||
CloudProviders []string
|
CloudProviders []string
|
||||||
Snapshotter string
|
Snapshotter string
|
||||||
Parameters map[string]string
|
Parameters map[string]string
|
||||||
NodeName string
|
NodeName string
|
||||||
NodeSelector map[string]string // NodeSelector for the pod
|
NodeSelector map[string]string // NodeSelector for the pod
|
||||||
SnapshotContentCheck func(snapshotContent *unstructured.Unstructured) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type snapshottableTestSuite struct {
|
type snapshottableTestSuite struct {
|
||||||
@@ -79,218 +75,129 @@ func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
return s.tsInfo
|
return s.tsInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *snapshottableTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
|
func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
dInfo := driver.GetDriverInfo()
|
var (
|
||||||
if !dInfo.Capabilities[CapDataSource] {
|
sDriver SnapshottableTestDriver
|
||||||
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
dDriver DynamicPVTestDriver
|
||||||
}
|
)
|
||||||
}
|
|
||||||
|
|
||||||
func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPattern) (snapshottableTestResource, snapshottableTestInput) {
|
BeforeEach(func() {
|
||||||
// Setup test resource for driver and testpattern
|
// Check preconditions.
|
||||||
resource := snapshottableTestResource{}
|
Expect(pattern.SnapshotType).To(Equal(testpatterns.DynamicCreatedSnapshot))
|
||||||
resource.setupResource(driver, pattern)
|
dInfo := driver.GetDriverInfo()
|
||||||
|
ok := false
|
||||||
dInfo := driver.GetDriverInfo()
|
sDriver, ok = driver.(SnapshottableTestDriver)
|
||||||
input := snapshottableTestInput{
|
if !dInfo.Capabilities[CapDataSource] || !ok {
|
||||||
testCase: SnapshotClassTest{
|
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
||||||
NodeName: dInfo.Config.ClientNodeName,
|
}
|
||||||
},
|
dDriver, ok = driver.(DynamicPVTestDriver)
|
||||||
cs: dInfo.Config.Framework.ClientSet,
|
if !ok {
|
||||||
dc: dInfo.Config.Framework.DynamicClient,
|
framework.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name)
|
||||||
pvc: resource.pvc,
|
}
|
||||||
sc: resource.sc,
|
|
||||||
vsc: resource.vsc,
|
|
||||||
dInfo: dInfo,
|
|
||||||
}
|
|
||||||
|
|
||||||
return resource, input
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *snapshottableTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
Context(getTestNameStr(s, pattern), func() {
|
|
||||||
var (
|
|
||||||
resource snapshottableTestResource
|
|
||||||
input snapshottableTestInput
|
|
||||||
needsCleanup bool
|
|
||||||
)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
needsCleanup = false
|
|
||||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
|
||||||
skipUnsupportedTest(s, driver, pattern)
|
|
||||||
needsCleanup = true
|
|
||||||
|
|
||||||
// Create test input
|
|
||||||
resource, input = createSnapshottableTestInput(driver, pattern)
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
if needsCleanup {
|
|
||||||
resource.cleanupResource(driver, pattern)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Ginkgo's "Global Shared Behaviors" require arguments for a shared function
|
|
||||||
// to be a single struct and to be passed as a pointer.
|
|
||||||
// Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details.
|
|
||||||
testSnapshot(&input)
|
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
type snapshottableTestResource struct {
|
// This intentionally comes after checking the preconditions because it
|
||||||
driver TestDriver
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
claimSize string
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
|
// f must run inside an It or Context callback.
|
||||||
|
f := framework.NewDefaultFramework("snapshotting")
|
||||||
|
|
||||||
sc *storage.StorageClass
|
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
|
||||||
pvc *v1.PersistentVolumeClaim
|
cs := f.ClientSet
|
||||||
// volume snapshot class
|
dc := f.DynamicClient
|
||||||
vsc *unstructured.Unstructured
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ TestResource = &snapshottableTestResource{}
|
// Now do the more expensive test initialization.
|
||||||
|
config, testCleanup := driver.PrepareTest(f)
|
||||||
|
defer testCleanup()
|
||||||
|
|
||||||
func (s *snapshottableTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
vsc := sDriver.GetSnapshotClass(config)
|
||||||
// Setup snapshottableTest resource
|
class := dDriver.GetDynamicProvisionStorageClass(config, "")
|
||||||
switch pattern.SnapshotType {
|
if class == nil {
|
||||||
case testpatterns.DynamicCreatedSnapshot:
|
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
|
||||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
|
||||||
s.sc = dDriver.GetDynamicProvisionStorageClass("")
|
|
||||||
if s.sc == nil {
|
|
||||||
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name)
|
|
||||||
}
|
|
||||||
s.driver = driver
|
|
||||||
s.claimSize = dDriver.GetClaimSize()
|
|
||||||
s.pvc = getClaim(s.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name)
|
|
||||||
s.pvc.Spec.StorageClassName = &s.sc.Name
|
|
||||||
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", s.sc, s.pvc)
|
|
||||||
|
|
||||||
if sDriver, ok := driver.(SnapshottableTestDriver); ok {
|
|
||||||
s.vsc = sDriver.GetSnapshotClass()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
default:
|
claimSize := dDriver.GetClaimSize()
|
||||||
framework.Failf("Dynamic Snapshot test doesn't support: %s", pattern.SnapshotType)
|
pvc := getClaim(claimSize, config.Framework.Namespace.Name)
|
||||||
}
|
pvc.Spec.StorageClassName = &class.Name
|
||||||
}
|
framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc)
|
||||||
|
|
||||||
func (s *snapshottableTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
}
|
|
||||||
|
|
||||||
type snapshottableTestInput struct {
|
|
||||||
testCase SnapshotClassTest
|
|
||||||
cs clientset.Interface
|
|
||||||
dc dynamic.Interface
|
|
||||||
pvc *v1.PersistentVolumeClaim
|
|
||||||
sc *storage.StorageClass
|
|
||||||
// volume snapshot class
|
|
||||||
vsc *unstructured.Unstructured
|
|
||||||
dInfo *DriverInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSnapshot(input *snapshottableTestInput) {
|
|
||||||
It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() {
|
|
||||||
TestCreateSnapshot(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestCreateSnapshot tests dynamic creating snapshot with specified SnapshotClassTest and snapshotClass
|
|
||||||
func TestCreateSnapshot(
|
|
||||||
t SnapshotClassTest,
|
|
||||||
client clientset.Interface,
|
|
||||||
dynamicClient dynamic.Interface,
|
|
||||||
claim *v1.PersistentVolumeClaim,
|
|
||||||
class *storage.StorageClass,
|
|
||||||
snapshotClass *unstructured.Unstructured,
|
|
||||||
) *unstructured.Unstructured {
|
|
||||||
var err error
|
|
||||||
if class != nil {
|
|
||||||
By("creating a StorageClass " + class.Name)
|
By("creating a StorageClass " + class.Name)
|
||||||
class, err = client.StorageV1().StorageClasses().Create(class)
|
class, err := cs.StorageV1().StorageClasses().Create(class)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.Logf("deleting storage class %s", class.Name)
|
framework.Logf("deleting storage class %s", class.Name)
|
||||||
framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil))
|
framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil))
|
||||||
}()
|
}()
|
||||||
}
|
|
||||||
|
|
||||||
By("creating a claim")
|
By("creating a claim")
|
||||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name)
|
||||||
// typically this claim has already been deleted
|
// typically this claim has already been deleted
|
||||||
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil)
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
|
framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("checking the claim")
|
|
||||||
// Get new copy of the claim
|
|
||||||
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Get the bound PV
|
|
||||||
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("creating a SnapshotClass")
|
|
||||||
snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
defer func() {
|
|
||||||
framework.Logf("deleting SnapshotClass %s", snapshotClass.GetName())
|
|
||||||
framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil))
|
|
||||||
}()
|
|
||||||
|
|
||||||
By("creating a snapshot")
|
|
||||||
snapshot := getSnapshot(claim.Name, claim.Namespace, snapshotClass.GetName())
|
|
||||||
|
|
||||||
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
defer func() {
|
|
||||||
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
|
||||||
// typically this snapshot has already been deleted
|
|
||||||
err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
|
|
||||||
if err != nil && !apierrs.IsNotFound(err) {
|
|
||||||
framework.Failf("Error deleting snapshot %q. Error: %v", claim.Name, err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
err = WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("checking the snapshot")
|
|
||||||
// Get new copy of the snapshot
|
|
||||||
snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Get the bound snapshotContent
|
|
||||||
snapshotSpec := snapshot.Object["spec"].(map[string]interface{})
|
|
||||||
snapshotContentName := snapshotSpec["snapshotContentName"].(string)
|
|
||||||
snapshotContent, err := dynamicClient.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{})
|
|
||||||
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})
|
|
||||||
persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{})
|
|
||||||
|
|
||||||
// Check SnapshotContent properties
|
|
||||||
By("checking the SnapshotContent")
|
|
||||||
Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(snapshotClass.GetName()))
|
|
||||||
Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName()))
|
|
||||||
Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace()))
|
|
||||||
Expect(persistentVolumeRef["name"]).To(Equal(pv.Name))
|
|
||||||
|
|
||||||
// Run the checker
|
|
||||||
if t.SnapshotContentCheck != nil {
|
|
||||||
err = t.SnapshotContentCheck(snapshotContent)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
|
||||||
|
|
||||||
return snapshotContent
|
By("checking the claim")
|
||||||
|
// Get new copy of the claim
|
||||||
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// Get the bound PV
|
||||||
|
pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("creating a SnapshotClass")
|
||||||
|
vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
defer func() {
|
||||||
|
framework.Logf("deleting SnapshotClass %s", vsc.GetName())
|
||||||
|
framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil))
|
||||||
|
}()
|
||||||
|
|
||||||
|
By("creating a snapshot")
|
||||||
|
snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName())
|
||||||
|
|
||||||
|
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
defer func() {
|
||||||
|
framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName())
|
||||||
|
// typically this snapshot has already been deleted
|
||||||
|
err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil)
|
||||||
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
|
framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("checking the snapshot")
|
||||||
|
// Get new copy of the snapshot
|
||||||
|
snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// Get the bound snapshotContent
|
||||||
|
snapshotSpec := snapshot.Object["spec"].(map[string]interface{})
|
||||||
|
snapshotContentName := snapshotSpec["snapshotContentName"].(string)
|
||||||
|
snapshotContent, err := dc.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{})
|
||||||
|
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})
|
||||||
|
persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{})
|
||||||
|
|
||||||
|
// Check SnapshotContent properties
|
||||||
|
By("checking the SnapshotContent")
|
||||||
|
Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(vsc.GetName()))
|
||||||
|
Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName()))
|
||||||
|
Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace()))
|
||||||
|
Expect(persistentVolumeRef["name"]).To(Equal(pv.Name))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||||
|
@@ -26,6 +26,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
@@ -71,345 +72,352 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
return s.tsInfo
|
return s.tsInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
|
func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
}
|
var (
|
||||||
|
config *PerTestConfig
|
||||||
|
testCleanup func()
|
||||||
|
cs clientset.Interface
|
||||||
|
resource *genericVolumeTestResource
|
||||||
|
roVolSource *v1.VolumeSource
|
||||||
|
pod *v1.Pod
|
||||||
|
formatPod *v1.Pod
|
||||||
|
subPathDir string
|
||||||
|
filePathInSubpath string
|
||||||
|
filePathInVolume string
|
||||||
|
)
|
||||||
|
|
||||||
func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput {
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
driver := resource.driver
|
|
||||||
dInfo := driver.GetDriverInfo()
|
|
||||||
f := dInfo.Config.Framework
|
|
||||||
subPath := f.Namespace.Name
|
|
||||||
subPathDir := filepath.Join(volumePath, subPath)
|
|
||||||
|
|
||||||
return subPathTestInput{
|
// This intentionally comes after checking the preconditions because it
|
||||||
f: f,
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
subPathDir: subPathDir,
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
filePathInSubpath: filepath.Join(volumePath, fileName),
|
// f must run inside an It or Context callback.
|
||||||
filePathInVolume: filepath.Join(subPathDir, fileName),
|
f := framework.NewDefaultFramework("provisioning")
|
||||||
volType: resource.volType,
|
|
||||||
pod: resource.pod,
|
|
||||||
formatPod: resource.formatPod,
|
|
||||||
volSource: resource.genericVolumeTestResource.volSource,
|
|
||||||
roVol: resource.roVolSource,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *subPathTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
init := func() {
|
||||||
Context(getTestNameStr(s, pattern), func() {
|
cs = f.ClientSet // needed for cleanup, f.ClientSet itself gets reset too early
|
||||||
var (
|
|
||||||
resource subPathTestResource
|
|
||||||
input subPathTestInput
|
|
||||||
needsCleanup bool
|
|
||||||
)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
// Now do the more expensive test initialization.
|
||||||
needsCleanup = false
|
config, testCleanup = driver.PrepareTest(f)
|
||||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
fsType := pattern.FsType
|
||||||
skipUnsupportedTest(s, driver, pattern)
|
volType := pattern.VolType
|
||||||
needsCleanup = true
|
|
||||||
|
|
||||||
// Setup test resource for driver and testpattern
|
resource = createGenericVolumeTestResource(driver, config, pattern)
|
||||||
resource = subPathTestResource{}
|
|
||||||
resource.setupResource(driver, pattern)
|
|
||||||
|
|
||||||
// Create test input
|
// Setup subPath test dependent resource
|
||||||
input = createSubPathTestInput(pattern, resource)
|
roVolSource = nil
|
||||||
})
|
switch volType {
|
||||||
|
case testpatterns.InlineVolume:
|
||||||
AfterEach(func() {
|
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
||||||
if needsCleanup {
|
roVolSource = iDriver.GetVolumeSource(true, fsType, resource.volume)
|
||||||
resource.cleanupResource(driver, pattern)
|
|
||||||
}
|
}
|
||||||
})
|
case testpatterns.PreprovisionedPV:
|
||||||
|
roVolSource = &v1.VolumeSource{
|
||||||
testSubPath(&input)
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
})
|
ClaimName: resource.pvc.Name,
|
||||||
}
|
ReadOnly: true,
|
||||||
|
},
|
||||||
type subPathTestResource struct {
|
}
|
||||||
genericVolumeTestResource
|
case testpatterns.DynamicPV:
|
||||||
|
roVolSource = &v1.VolumeSource{
|
||||||
roVolSource *v1.VolumeSource
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
pod *v1.Pod
|
ClaimName: resource.pvc.Name,
|
||||||
formatPod *v1.Pod
|
ReadOnly: true,
|
||||||
}
|
},
|
||||||
|
}
|
||||||
var _ TestResource = &subPathTestResource{}
|
default:
|
||||||
|
framework.Failf("SubPath test doesn't support: %s", volType)
|
||||||
func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
s.driver = driver
|
|
||||||
dInfo := s.driver.GetDriverInfo()
|
|
||||||
f := dInfo.Config.Framework
|
|
||||||
fsType := pattern.FsType
|
|
||||||
volType := pattern.VolType
|
|
||||||
|
|
||||||
// Setup generic test resource
|
|
||||||
s.genericVolumeTestResource.setupResource(driver, pattern)
|
|
||||||
|
|
||||||
// Setup subPath test dependent resource
|
|
||||||
switch volType {
|
|
||||||
case testpatterns.InlineVolume:
|
|
||||||
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
|
||||||
s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.volume)
|
|
||||||
}
|
}
|
||||||
case testpatterns.PreprovisionedPV:
|
|
||||||
s.roVolSource = &v1.VolumeSource{
|
subPath := f.Namespace.Name
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
pod = SubpathTestPod(f, subPath, resource.volType, resource.volSource, true)
|
||||||
ClaimName: s.genericVolumeTestResource.pvc.Name,
|
pod.Spec.NodeName = config.ClientNodeName
|
||||||
ReadOnly: true,
|
pod.Spec.NodeSelector = config.ClientNodeSelector
|
||||||
},
|
|
||||||
}
|
formatPod = volumeFormatPod(f, resource.volSource)
|
||||||
case testpatterns.DynamicPV:
|
formatPod.Spec.NodeName = config.ClientNodeName
|
||||||
s.roVolSource = &v1.VolumeSource{
|
formatPod.Spec.NodeSelector = config.ClientNodeSelector
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
||||||
ClaimName: s.genericVolumeTestResource.pvc.Name,
|
subPathDir = filepath.Join(volumePath, subPath)
|
||||||
ReadOnly: true,
|
filePathInSubpath = filepath.Join(volumePath, fileName)
|
||||||
},
|
filePathInVolume = filepath.Join(subPathDir, fileName)
|
||||||
}
|
|
||||||
default:
|
|
||||||
framework.Failf("SubPath test doesn't support: %s", volType)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
subPath := f.Namespace.Name
|
cleanup := func() {
|
||||||
config := dInfo.Config
|
if pod != nil {
|
||||||
s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true)
|
By("Deleting pod")
|
||||||
s.pod.Spec.NodeName = config.ClientNodeName
|
err := framework.DeletePodWithWait(f, cs, pod)
|
||||||
s.pod.Spec.NodeSelector = config.ClientNodeSelector
|
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
|
||||||
|
pod = nil
|
||||||
|
}
|
||||||
|
|
||||||
s.formatPod = volumeFormatPod(f, s.volSource)
|
if resource != nil {
|
||||||
s.formatPod.Spec.NodeName = config.ClientNodeName
|
resource.cleanupResource()
|
||||||
s.formatPod.Spec.NodeSelector = config.ClientNodeSelector
|
resource = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *subPathTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
if testCleanup != nil {
|
||||||
dInfo := driver.GetDriverInfo()
|
testCleanup()
|
||||||
f := dInfo.Config.Framework
|
testCleanup = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Cleanup subPath test dependent resource
|
|
||||||
By("Deleting pod")
|
|
||||||
err := framework.DeletePodWithWait(f, f.ClientSet, s.pod)
|
|
||||||
Expect(err).ToNot(HaveOccurred(), "while deleting pod")
|
|
||||||
|
|
||||||
// Cleanup generic test resource
|
|
||||||
s.genericVolumeTestResource.cleanupResource(driver, pattern)
|
|
||||||
}
|
|
||||||
|
|
||||||
type subPathTestInput struct {
|
|
||||||
f *framework.Framework
|
|
||||||
subPathDir string
|
|
||||||
filePathInSubpath string
|
|
||||||
filePathInVolume string
|
|
||||||
volType string
|
|
||||||
pod *v1.Pod
|
|
||||||
formatPod *v1.Pod
|
|
||||||
volSource *v1.VolumeSource
|
|
||||||
roVol *v1.VolumeSource
|
|
||||||
}
|
|
||||||
|
|
||||||
func testSubPath(input *subPathTestInput) {
|
|
||||||
It("should support non-existent path", func() {
|
It("should support non-existent path", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Write the file in the subPath from init container 1
|
// Write the file in the subPath from init container 1
|
||||||
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
|
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
|
||||||
|
|
||||||
// Read it from outside the subPath from container 1
|
// Read it from outside the subPath from container 1
|
||||||
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
|
testReadFile(f, filePathInVolume, pod, 1)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support existing directory", func() {
|
It("should support existing directory", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
|
||||||
|
|
||||||
// Write the file in the subPath from init container 1
|
// Write the file in the subPath from init container 1
|
||||||
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
|
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
|
||||||
|
|
||||||
// Read it from outside the subPath from container 1
|
// Read it from outside the subPath from container 1
|
||||||
testReadFile(input.f, input.filePathInVolume, input.pod, 1)
|
testReadFile(f, filePathInVolume, pod, 1)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support existing single file", func() {
|
It("should support existing single file", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the file in the init container
|
// Create the file in the init container
|
||||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume))
|
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume))
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
testReadFile(f, filePathInSubpath, pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support file as subpath", func() {
|
It("should support file as subpath", func() {
|
||||||
// Create the file in the init container
|
init()
|
||||||
setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir))
|
defer cleanup()
|
||||||
|
|
||||||
TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod)
|
// Create the file in the init container
|
||||||
|
setInitCommand(pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, subPathDir))
|
||||||
|
|
||||||
|
TestBasicSubpath(f, f.Namespace.Name, pod)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if subpath directory is outside the volume [Slow]", func() {
|
It("should fail if subpath directory is outside the volume [Slow]", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(input.f, input.pod, false)
|
testPodFailSubpath(f, pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if subpath file is outside the volume [Slow]", func() {
|
It("should fail if subpath file is outside the volume [Slow]", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(input.f, input.pod, false)
|
testPodFailSubpath(f, pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
|
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(input.f, input.pod, false)
|
testPodFailSubpath(f, pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
|
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the subpath outside the volume
|
// Create the subpath outside the volume
|
||||||
setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir))
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(input.f, input.pod, false)
|
testPodFailSubpath(f, pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support creating multiple subpath from same volumes [Slow]", func() {
|
It("should support creating multiple subpath from same volumes [Slow]", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
subpathDir1 := filepath.Join(volumePath, "subpath1")
|
subpathDir1 := filepath.Join(volumePath, "subpath1")
|
||||||
subpathDir2 := filepath.Join(volumePath, "subpath2")
|
subpathDir2 := filepath.Join(volumePath, "subpath2")
|
||||||
filepath1 := filepath.Join("/test-subpath1", fileName)
|
filepath1 := filepath.Join("/test-subpath1", fileName)
|
||||||
filepath2 := filepath.Join("/test-subpath2", fileName)
|
filepath2 := filepath.Join("/test-subpath2", fileName)
|
||||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
|
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
|
||||||
|
|
||||||
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
|
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
MountPath: "/test-subpath1",
|
MountPath: "/test-subpath1",
|
||||||
SubPath: "subpath1",
|
SubPath: "subpath1",
|
||||||
})
|
})
|
||||||
addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{
|
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
|
||||||
Name: volumeName,
|
Name: volumeName,
|
||||||
MountPath: "/test-subpath2",
|
MountPath: "/test-subpath2",
|
||||||
SubPath: "subpath2",
|
SubPath: "subpath2",
|
||||||
})
|
})
|
||||||
|
|
||||||
// Write the files from container 0 and instantly read them back
|
// Write the files from container 0 and instantly read them back
|
||||||
addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2)
|
addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2)
|
||||||
testMultipleReads(input.f, input.pod, 0, filepath1, filepath2)
|
testMultipleReads(f, pod, 0, filepath1, filepath2)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support restarting containers using directory as subpath [Slow]", func() {
|
It("should support restarting containers using directory as subpath [Slow]", func() {
|
||||||
// Create the directory
|
init()
|
||||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath))
|
defer cleanup()
|
||||||
|
|
||||||
testPodContainerRestart(input.f, input.pod)
|
// Create the directory
|
||||||
|
setInitCommand(pod, fmt.Sprintf("mkdir -p %v; touch %v", subPathDir, probeFilePath))
|
||||||
|
|
||||||
|
testPodContainerRestart(f, pod)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support restarting containers using file as subpath [Slow]", func() {
|
It("should support restarting containers using file as subpath [Slow]", func() {
|
||||||
// Create the file
|
init()
|
||||||
setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath))
|
defer cleanup()
|
||||||
|
|
||||||
testPodContainerRestart(input.f, input.pod)
|
// Create the file
|
||||||
|
setInitCommand(pod, fmt.Sprintf("touch %v; touch %v", subPathDir, probeFilePath))
|
||||||
|
|
||||||
|
testPodContainerRestart(f, pod)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
|
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||||
testSubpathReconstruction(input.f, input.pod, false)
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
testSubpathReconstruction(f, pod, false)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
|
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
|
||||||
if strings.HasPrefix(input.volType, "hostPath") || strings.HasPrefix(input.volType, "csi-hostpath") {
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
if strings.HasPrefix(resource.volType, "hostPath") || strings.HasPrefix(resource.volType, "csi-hostpath") {
|
||||||
// TODO: This skip should be removed once #61446 is fixed
|
// TODO: This skip should be removed once #61446 is fixed
|
||||||
framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType)
|
framework.Skipf("%s volume type does not support reconstruction, skipping", resource.volType)
|
||||||
}
|
}
|
||||||
testSubpathReconstruction(input.f, input.pod, true)
|
|
||||||
|
testSubpathReconstruction(f, pod, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support readOnly directory specified in the volumeMount", func() {
|
It("should support readOnly directory specified in the volumeMount", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
|
||||||
|
|
||||||
// Write the file in the volume from init container 2
|
// Write the file in the volume from init container 2
|
||||||
setWriteCommand(input.filePathInVolume, &input.pod.Spec.InitContainers[2])
|
setWriteCommand(filePathInVolume, &pod.Spec.InitContainers[2])
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
testReadFile(f, filePathInSubpath, pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support readOnly file specified in the volumeMount", func() {
|
It("should support readOnly file specified in the volumeMount", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
// Create the file
|
// Create the file
|
||||||
setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("touch %s", subPathDir))
|
||||||
|
|
||||||
// Write the file in the volume from init container 2
|
// Write the file in the volume from init container 2
|
||||||
setWriteCommand(input.subPathDir, &input.pod.Spec.InitContainers[2])
|
setWriteCommand(subPathDir, &pod.Spec.InitContainers[2])
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
|
||||||
testReadFile(input.f, volumePath, input.pod, 0)
|
testReadFile(f, volumePath, pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should support existing directories when readOnly specified in the volumeSource", func() {
|
It("should support existing directories when readOnly specified in the volumeSource", func() {
|
||||||
if input.roVol == nil {
|
init()
|
||||||
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
|
defer cleanup()
|
||||||
|
if roVolSource == nil {
|
||||||
|
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
|
||||||
}
|
}
|
||||||
|
|
||||||
pod := input.pod.DeepCopy()
|
origpod := pod.DeepCopy()
|
||||||
|
|
||||||
// Create the directory
|
// Create the directory
|
||||||
setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir))
|
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
|
||||||
|
|
||||||
// Write the file in the subPath from init container 1
|
// Write the file in the subPath from init container 1
|
||||||
setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1])
|
setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1])
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
testReadFile(f, filePathInSubpath, pod, 0)
|
||||||
|
|
||||||
// Reset the pod
|
// Reset the pod
|
||||||
input.pod = pod
|
pod = origpod
|
||||||
|
|
||||||
// Set volume source to read only
|
// Set volume source to read only
|
||||||
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
|
pod.Spec.Volumes[0].VolumeSource = *roVolSource
|
||||||
|
|
||||||
// Read it from inside the subPath from container 0
|
// Read it from inside the subPath from container 0
|
||||||
testReadFile(input.f, input.filePathInSubpath, input.pod, 0)
|
testReadFile(f, filePathInSubpath, pod, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should verify container cannot write to subpath readonly volumes", func() {
|
It("should verify container cannot write to subpath readonly volumes", func() {
|
||||||
if input.roVol == nil {
|
init()
|
||||||
framework.Skipf("Volume type %v doesn't support readOnly source", input.volType)
|
defer cleanup()
|
||||||
|
if roVolSource == nil {
|
||||||
|
framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Format the volume while it's writable
|
// Format the volume while it's writable
|
||||||
formatVolume(input.f, input.formatPod)
|
formatVolume(f, formatPod)
|
||||||
|
|
||||||
// Set volume source to read only
|
// Set volume source to read only
|
||||||
input.pod.Spec.Volumes[0].VolumeSource = *input.roVol
|
pod.Spec.Volumes[0].VolumeSource = *roVolSource
|
||||||
|
|
||||||
// Write the file in the volume from container 0
|
// Write the file in the volume from container 0
|
||||||
setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[0])
|
setWriteCommand(subPathDir, &pod.Spec.Containers[0])
|
||||||
|
|
||||||
// Pod should fail
|
// Pod should fail
|
||||||
testPodFailSubpath(input.f, input.pod, true)
|
testPodFailSubpath(f, pod, true)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should be able to unmount after the subpath directory is deleted", func() {
|
It("should be able to unmount after the subpath directory is deleted", func() {
|
||||||
// Change volume container to busybox so we can exec later
|
init()
|
||||||
input.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
defer cleanup()
|
||||||
input.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
|
||||||
|
|
||||||
By(fmt.Sprintf("Creating pod %s", input.pod.Name))
|
// Change volume container to busybox so we can exec later
|
||||||
removeUnusedContainers(input.pod)
|
pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox)
|
||||||
pod, err := input.f.ClientSet.CoreV1().Pods(input.f.Namespace.Name).Create(input.pod)
|
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
|
||||||
|
|
||||||
|
By(fmt.Sprintf("Creating pod %s", pod.Name))
|
||||||
|
removeUnusedContainers(pod)
|
||||||
|
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
Expect(err).ToNot(HaveOccurred(), "while creating pod")
|
||||||
defer func() {
|
defer func() {
|
||||||
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
By(fmt.Sprintf("Deleting pod %s", pod.Name))
|
||||||
framework.DeletePodWithWait(input.f, input.f.ClientSet, pod)
|
framework.DeletePodWithWait(f, f.ClientSet, pod)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Wait for pod to be running
|
// Wait for pod to be running
|
||||||
err = framework.WaitForPodRunningInNamespace(input.f.ClientSet, pod)
|
err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running")
|
||||||
|
|
||||||
// Exec into container that mounted the volume, delete subpath directory
|
// Exec into container that mounted the volume, delete subpath directory
|
||||||
rmCmd := fmt.Sprintf("rm -rf %s", input.subPathDir)
|
rmCmd := fmt.Sprintf("rm -rf %s", subPathDir)
|
||||||
_, err = podContainerExec(pod, 1, rmCmd)
|
_, err = podContainerExec(pod, 1, rmCmd)
|
||||||
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")
|
Expect(err).ToNot(HaveOccurred(), "while removing subpath directory")
|
||||||
|
|
||||||
|
@@ -25,17 +25,29 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestDriver represents an interface for a driver to be tested in TestSuite
|
// TestDriver represents an interface for a driver to be tested in TestSuite.
|
||||||
|
// Except for GetDriverInfo, all methods will be called at test runtime and thus
|
||||||
|
// can use framework.Skipf, framework.Fatal, Gomega assertions, etc.
|
||||||
type TestDriver interface {
|
type TestDriver interface {
|
||||||
// GetDriverInfo returns DriverInfo for the TestDriver
|
// GetDriverInfo returns DriverInfo for the TestDriver. This must be static
|
||||||
|
// information.
|
||||||
GetDriverInfo() *DriverInfo
|
GetDriverInfo() *DriverInfo
|
||||||
// CreateDriver creates all driver resources that is required for TestDriver method
|
|
||||||
// except CreateVolume
|
// SkipUnsupportedTest skips test if Testpattern is not
|
||||||
CreateDriver()
|
// suitable to test with the TestDriver. It gets called after
|
||||||
// CreateDriver cleanup all the resources that is created in CreateDriver
|
// parsing parameters of the test suite and before the
|
||||||
CleanupDriver()
|
// framework is initialized. Cheap tests that just check
|
||||||
// SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver
|
// parameters like the cloud provider can and should be
|
||||||
|
// done in SkipUnsupportedTest to avoid setting up more
|
||||||
|
// expensive resources like framework.Framework. Tests that
|
||||||
|
// depend on a connection to the cluster can be done in
|
||||||
|
// PrepareTest once the framework is ready.
|
||||||
SkipUnsupportedTest(testpatterns.TestPattern)
|
SkipUnsupportedTest(testpatterns.TestPattern)
|
||||||
|
|
||||||
|
// PrepareTest is called at test execution time each time a new test case is about to start.
|
||||||
|
// It sets up all necessary resources and returns the per-test configuration
|
||||||
|
// plus a cleanup function that frees all allocated resources.
|
||||||
|
PrepareTest(f *framework.Framework) (*PerTestConfig, func())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume.
|
// TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume.
|
||||||
@@ -49,7 +61,7 @@ type TestVolume interface {
|
|||||||
type PreprovisionedVolumeTestDriver interface {
|
type PreprovisionedVolumeTestDriver interface {
|
||||||
TestDriver
|
TestDriver
|
||||||
// CreateVolume creates a pre-provisioned volume of the desired volume type.
|
// CreateVolume creates a pre-provisioned volume of the desired volume type.
|
||||||
CreateVolume(volumeType testpatterns.TestVolType) TestVolume
|
CreateVolume(config *PerTestConfig, volumeType testpatterns.TestVolType) TestVolume
|
||||||
}
|
}
|
||||||
|
|
||||||
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
|
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
|
||||||
@@ -68,7 +80,6 @@ type PreprovisionedPVTestDriver interface {
|
|||||||
// GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume.
|
// GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume.
|
||||||
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
|
// It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them.
|
||||||
// It will return nil, if the TestDriver doesn't support either of the parameters.
|
// It will return nil, if the TestDriver doesn't support either of the parameters.
|
||||||
// Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity.
|
|
||||||
GetPersistentVolumeSource(readOnly bool, fsType string, testVolume TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity)
|
GetPersistentVolumeSource(readOnly bool, fsType string, testVolume TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -78,7 +89,7 @@ type DynamicPVTestDriver interface {
|
|||||||
// GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume.
|
// GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume.
|
||||||
// It will set fsType to the StorageClass, if TestDriver supports it.
|
// It will set fsType to the StorageClass, if TestDriver supports it.
|
||||||
// It will return nil, if the TestDriver doesn't support it.
|
// It will return nil, if the TestDriver doesn't support it.
|
||||||
GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass
|
GetDynamicProvisionStorageClass(config *PerTestConfig, fsType string) *storagev1.StorageClass
|
||||||
|
|
||||||
// GetClaimSize returns the size of the volume that is to be provisioned ("5Gi", "1Mi").
|
// GetClaimSize returns the size of the volume that is to be provisioned ("5Gi", "1Mi").
|
||||||
// The size must be chosen so that the resulting volume is large enough for all
|
// The size must be chosen so that the resulting volume is large enough for all
|
||||||
@@ -91,7 +102,7 @@ type SnapshottableTestDriver interface {
|
|||||||
TestDriver
|
TestDriver
|
||||||
// GetSnapshotClass returns a SnapshotClass to create snapshot.
|
// GetSnapshotClass returns a SnapshotClass to create snapshot.
|
||||||
// It will return nil, if the TestDriver doesn't support it.
|
// It will return nil, if the TestDriver doesn't support it.
|
||||||
GetSnapshotClass() *unstructured.Unstructured
|
GetSnapshotClass(config *PerTestConfig) *unstructured.Unstructured
|
||||||
}
|
}
|
||||||
|
|
||||||
// Capability represents a feature that a volume plugin supports
|
// Capability represents a feature that a volume plugin supports
|
||||||
@@ -112,7 +123,7 @@ const (
|
|||||||
CapMultiPODs Capability = "multipods"
|
CapMultiPODs Capability = "multipods"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver
|
// DriverInfo represents static information about a TestDriver.
|
||||||
type DriverInfo struct {
|
type DriverInfo struct {
|
||||||
Name string // Name of the driver
|
Name string // Name of the driver
|
||||||
FeatureTag string // FeatureTag for the driver
|
FeatureTag string // FeatureTag for the driver
|
||||||
@@ -122,14 +133,15 @@ type DriverInfo struct {
|
|||||||
SupportedMountOption sets.String // Map of string for supported mount option
|
SupportedMountOption sets.String // Map of string for supported mount option
|
||||||
RequiredMountOption sets.String // Map of string for required mount option (Optional)
|
RequiredMountOption sets.String // Map of string for required mount option (Optional)
|
||||||
Capabilities map[Capability]bool // Map that represents plugin capabilities
|
Capabilities map[Capability]bool // Map that represents plugin capabilities
|
||||||
|
|
||||||
Config TestConfig // Test configuration for the current test.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestConfig represents parameters that control test execution.
|
// PerTestConfig represents parameters that control test execution.
|
||||||
// They can still be modified after defining tests, for example
|
// One instance gets allocated for each test and is then passed
|
||||||
// in a BeforeEach or when creating the driver.
|
// via pointer to functions involved in the test.
|
||||||
type TestConfig struct {
|
type PerTestConfig struct {
|
||||||
|
// The test driver for the test.
|
||||||
|
Driver TestDriver
|
||||||
|
|
||||||
// Some short word that gets inserted into dynamically
|
// Some short word that gets inserted into dynamically
|
||||||
// generated entities (pods, paths) as first part of the name
|
// generated entities (pods, paths) as first part of the name
|
||||||
// to make debugging easier. Can be the same for different
|
// to make debugging easier. Can be the same for different
|
||||||
@@ -154,8 +166,9 @@ type TestConfig struct {
|
|||||||
// the configuration that then has to be used to run tests.
|
// the configuration that then has to be used to run tests.
|
||||||
// The values above are ignored for such tests.
|
// The values above are ignored for such tests.
|
||||||
ServerConfig *framework.VolumeTestConfig
|
ServerConfig *framework.VolumeTestConfig
|
||||||
|
}
|
||||||
// TopologyEnabled indicates that the Topology feature gate
|
|
||||||
// should be enabled in external-provisioner
|
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
|
||||||
TopologyEnabled bool
|
func (config *PerTestConfig) GetUniqueDriverName() string {
|
||||||
|
return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName
|
||||||
}
|
}
|
||||||
|
@@ -74,87 +74,59 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
return t.tsInfo
|
return t.tsInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
|
func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
}
|
var (
|
||||||
|
dInfo = driver.GetDriverInfo()
|
||||||
|
config *PerTestConfig
|
||||||
|
testCleanup func()
|
||||||
|
resource *genericVolumeTestResource
|
||||||
|
)
|
||||||
|
|
||||||
func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput {
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
var fsGroup *int64
|
|
||||||
driver := resource.driver
|
|
||||||
dInfo := driver.GetDriverInfo()
|
|
||||||
f := dInfo.Config.Framework
|
|
||||||
fileSizes := createFileSizes(dInfo.MaxFileSize)
|
|
||||||
volSource := resource.volSource
|
|
||||||
|
|
||||||
if volSource == nil {
|
// This intentionally comes after checking the preconditions because it
|
||||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
|
// f must run inside an It or Context callback.
|
||||||
|
f := framework.NewDefaultFramework("volumeio")
|
||||||
|
|
||||||
|
init := func() {
|
||||||
|
// Now do the more expensive test initialization.
|
||||||
|
config, testCleanup = driver.PrepareTest(f)
|
||||||
|
resource = createGenericVolumeTestResource(driver, config, pattern)
|
||||||
|
if resource.volSource == nil {
|
||||||
|
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dInfo.Capabilities[CapFsGroup] {
|
cleanup := func() {
|
||||||
fsGroupVal := int64(1234)
|
if resource != nil {
|
||||||
fsGroup = &fsGroupVal
|
resource.cleanupResource()
|
||||||
|
resource = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if testCleanup != nil {
|
||||||
|
testCleanup()
|
||||||
|
testCleanup = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumeIOTestInput{
|
|
||||||
f: f,
|
|
||||||
name: dInfo.Name,
|
|
||||||
config: &dInfo.Config,
|
|
||||||
volSource: *volSource,
|
|
||||||
testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name),
|
|
||||||
podSec: v1.PodSecurityContext{
|
|
||||||
FSGroup: fsGroup,
|
|
||||||
},
|
|
||||||
fileSizes: fileSizes,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *volumeIOTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
Context(getTestNameStr(t, pattern), func() {
|
|
||||||
var (
|
|
||||||
resource genericVolumeTestResource
|
|
||||||
input volumeIOTestInput
|
|
||||||
needsCleanup bool
|
|
||||||
)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
needsCleanup = false
|
|
||||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
|
||||||
skipUnsupportedTest(t, driver, pattern)
|
|
||||||
needsCleanup = true
|
|
||||||
|
|
||||||
// Setup test resource for driver and testpattern
|
|
||||||
resource = genericVolumeTestResource{}
|
|
||||||
resource.setupResource(driver, pattern)
|
|
||||||
|
|
||||||
// Create test input
|
|
||||||
input = createVolumeIOTestInput(pattern, resource)
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
if needsCleanup {
|
|
||||||
resource.cleanupResource(driver, pattern)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
execTestVolumeIO(&input)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type volumeIOTestInput struct {
|
|
||||||
f *framework.Framework
|
|
||||||
name string
|
|
||||||
config *TestConfig
|
|
||||||
volSource v1.VolumeSource
|
|
||||||
testFile string
|
|
||||||
podSec v1.PodSecurityContext
|
|
||||||
fileSizes []int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func execTestVolumeIO(input *volumeIOTestInput) {
|
|
||||||
It("should write files of various sizes, verify size, validate content [Slow]", func() {
|
It("should write files of various sizes, verify size, validate content [Slow]", func() {
|
||||||
f := input.f
|
init()
|
||||||
cs := f.ClientSet
|
defer cleanup()
|
||||||
|
|
||||||
err := testVolumeIO(f, cs, convertTestConfig(input.config), input.volSource, &input.podSec, input.testFile, input.fileSizes)
|
cs := f.ClientSet
|
||||||
|
fileSizes := createFileSizes(dInfo.MaxFileSize)
|
||||||
|
testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name)
|
||||||
|
var fsGroup *int64
|
||||||
|
if dInfo.Capabilities[CapFsGroup] {
|
||||||
|
fsGroupVal := int64(1234)
|
||||||
|
fsGroup = &fsGroupVal
|
||||||
|
}
|
||||||
|
podSec := v1.PodSecurityContext{
|
||||||
|
FSGroup: fsGroup,
|
||||||
|
}
|
||||||
|
err := testVolumeIO(f, cs, convertTestConfig(config), *resource.volSource, &podSec, testFile, fileSizes)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@@ -61,317 +61,252 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo {
|
|||||||
return t.tsInfo
|
return t.tsInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) {
|
func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
}
|
|
||||||
|
|
||||||
func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput {
|
|
||||||
driver := resource.driver
|
|
||||||
dInfo := driver.GetDriverInfo()
|
|
||||||
f := dInfo.Config.Framework
|
|
||||||
|
|
||||||
return volumeModeTestInput{
|
|
||||||
f: f,
|
|
||||||
sc: resource.sc,
|
|
||||||
pvc: resource.pvc,
|
|
||||||
pv: resource.pv,
|
|
||||||
testVolType: pattern.VolType,
|
|
||||||
nodeName: dInfo.Config.ClientNodeName,
|
|
||||||
volMode: pattern.VolMode,
|
|
||||||
isBlockSupported: dInfo.Capabilities[CapBlock],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver TestDriver) func(*volumeModeTestInput) {
|
|
||||||
dInfo := driver.GetDriverInfo()
|
|
||||||
isBlockSupported := dInfo.Capabilities[CapBlock]
|
|
||||||
volMode := pattern.VolMode
|
|
||||||
volType := pattern.VolType
|
|
||||||
|
|
||||||
switch volType {
|
|
||||||
case testpatterns.PreprovisionedPV:
|
|
||||||
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
|
||||||
return testVolumeModeFailForPreprovisionedPV
|
|
||||||
}
|
|
||||||
return testVolumeModeSuccessForPreprovisionedPV
|
|
||||||
case testpatterns.DynamicPV:
|
|
||||||
if volMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
|
||||||
return testVolumeModeFailForDynamicPV
|
|
||||||
}
|
|
||||||
return testVolumeModeSuccessForDynamicPV
|
|
||||||
default:
|
|
||||||
framework.Failf("Volume mode test doesn't support volType: %v", volType)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *volumeModeTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
Context(getTestNameStr(t, pattern), func() {
|
|
||||||
var (
|
|
||||||
resource volumeModeTestResource
|
|
||||||
input volumeModeTestInput
|
|
||||||
testFunc func(*volumeModeTestInput)
|
|
||||||
needsCleanup bool
|
|
||||||
)
|
|
||||||
|
|
||||||
testFunc = getVolumeModeTestFunc(pattern, driver)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
needsCleanup = false
|
|
||||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
|
||||||
skipUnsupportedTest(t, driver, pattern)
|
|
||||||
needsCleanup = true
|
|
||||||
|
|
||||||
// Setup test resource for driver and testpattern
|
|
||||||
resource = volumeModeTestResource{}
|
|
||||||
resource.setupResource(driver, pattern)
|
|
||||||
|
|
||||||
// Create test input
|
|
||||||
input = createVolumeModeTestInput(pattern, resource)
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
if needsCleanup {
|
|
||||||
resource.cleanupResource(driver, pattern)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
testFunc(&input)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
type volumeModeTestResource struct {
|
|
||||||
driver TestDriver
|
|
||||||
|
|
||||||
sc *storagev1.StorageClass
|
|
||||||
pvc *v1.PersistentVolumeClaim
|
|
||||||
pv *v1.PersistentVolume
|
|
||||||
|
|
||||||
volume TestVolume
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ TestResource = &volumeModeTestResource{}
|
|
||||||
|
|
||||||
func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
s.driver = driver
|
|
||||||
dInfo := driver.GetDriverInfo()
|
|
||||||
f := dInfo.Config.Framework
|
|
||||||
ns := f.Namespace
|
|
||||||
fsType := pattern.FsType
|
|
||||||
volBindMode := storagev1.VolumeBindingImmediate
|
|
||||||
volMode := pattern.VolMode
|
|
||||||
volType := pattern.VolType
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
scName string
|
dInfo = driver.GetDriverInfo()
|
||||||
pvSource *v1.PersistentVolumeSource
|
config *PerTestConfig
|
||||||
volumeNodeAffinity *v1.VolumeNodeAffinity
|
testCleanup func()
|
||||||
|
sc *storagev1.StorageClass
|
||||||
|
pvc *v1.PersistentVolumeClaim
|
||||||
|
pv *v1.PersistentVolume
|
||||||
|
volume TestVolume
|
||||||
)
|
)
|
||||||
|
|
||||||
// Create volume for pre-provisioned volume tests
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
s.volume = CreateVolume(driver, volType)
|
|
||||||
|
|
||||||
switch volType {
|
// This intentionally comes after checking the preconditions because it
|
||||||
case testpatterns.PreprovisionedPV:
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
if volMode == v1.PersistentVolumeBlock {
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
|
// f must run inside an It or Context callback.
|
||||||
} else if volMode == v1.PersistentVolumeFilesystem {
|
f := framework.NewDefaultFramework("volumemode")
|
||||||
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
|
|
||||||
}
|
init := func() {
|
||||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
// Now do the more expensive test initialization.
|
||||||
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.volume)
|
config, testCleanup = driver.PrepareTest(f)
|
||||||
if pvSource == nil {
|
|
||||||
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
ns := f.Namespace
|
||||||
|
fsType := pattern.FsType
|
||||||
|
volBindMode := storagev1.VolumeBindingImmediate
|
||||||
|
|
||||||
|
var (
|
||||||
|
scName string
|
||||||
|
pvSource *v1.PersistentVolumeSource
|
||||||
|
volumeNodeAffinity *v1.VolumeNodeAffinity
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create volume for pre-provisioned volume tests
|
||||||
|
volume = CreateVolume(driver, config, pattern.VolType)
|
||||||
|
|
||||||
|
switch pattern.VolType {
|
||||||
|
case testpatterns.PreprovisionedPV:
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||||
|
scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name)
|
||||||
|
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
|
||||||
|
scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name)
|
||||||
}
|
}
|
||||||
|
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||||
|
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, volume)
|
||||||
|
if pvSource == nil {
|
||||||
|
framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
|
|
||||||
sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource, volumeNodeAffinity)
|
storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
|
||||||
s.sc = sc
|
sc = storageClass
|
||||||
s.pv = framework.MakePersistentVolume(pvConfig)
|
pv = framework.MakePersistentVolume(pvConfig)
|
||||||
s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
|
pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name)
|
||||||
|
}
|
||||||
|
case testpatterns.DynamicPV:
|
||||||
|
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
||||||
|
sc = dDriver.GetDynamicProvisionStorageClass(config, fsType)
|
||||||
|
if sc == nil {
|
||||||
|
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
|
sc.VolumeBindingMode = &volBindMode
|
||||||
|
|
||||||
|
claimSize := dDriver.GetClaimSize()
|
||||||
|
pvc = getClaim(claimSize, ns.Name)
|
||||||
|
pvc.Spec.StorageClassName = &sc.Name
|
||||||
|
pvc.Spec.VolumeMode = &pattern.VolMode
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanup := func() {
|
||||||
|
if pv != nil || pvc != nil {
|
||||||
|
By("Deleting pv and pvc")
|
||||||
|
errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, pv, pvc)
|
||||||
|
if len(errs) > 0 {
|
||||||
|
framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
|
||||||
|
}
|
||||||
|
pv = nil
|
||||||
|
pvc = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if sc != nil {
|
||||||
|
By("Deleting sc")
|
||||||
|
deleteStorageClass(f.ClientSet, sc.Name)
|
||||||
|
sc = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if volume != nil {
|
||||||
|
volume.DeleteVolume()
|
||||||
|
volume = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if testCleanup != nil {
|
||||||
|
testCleanup()
|
||||||
|
testCleanup = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We register different tests depending on the drive
|
||||||
|
isBlockSupported := dInfo.Capabilities[CapBlock]
|
||||||
|
switch pattern.VolType {
|
||||||
|
case testpatterns.PreprovisionedPV:
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||||
|
It("should fail to create pod by failing to mount volume", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
cs := f.ClientSet
|
||||||
|
ns := f.Namespace
|
||||||
|
var err error
|
||||||
|
|
||||||
|
By("Creating sc")
|
||||||
|
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Creating pv and pvc")
|
||||||
|
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// Prebind pv
|
||||||
|
pvc.Spec.VolumeName = pv.Name
|
||||||
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
|
||||||
|
|
||||||
|
By("Creating pod")
|
||||||
|
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
|
||||||
|
false, "", false, false, framework.SELinuxLabel,
|
||||||
|
nil, config.ClientNodeName, framework.PodStartTimeout)
|
||||||
|
defer func() {
|
||||||
|
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
||||||
|
}()
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
cs := f.ClientSet
|
||||||
|
ns := f.Namespace
|
||||||
|
var err error
|
||||||
|
|
||||||
|
By("Creating sc")
|
||||||
|
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Creating pv and pvc")
|
||||||
|
pv, err = cs.CoreV1().PersistentVolumes().Create(pv)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// Prebind pv
|
||||||
|
pvc.Spec.VolumeName = pv.Name
|
||||||
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc))
|
||||||
|
|
||||||
|
By("Creating pod")
|
||||||
|
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
|
||||||
|
false, "", false, false, framework.SELinuxLabel,
|
||||||
|
nil, config.ClientNodeName, framework.PodStartTimeout)
|
||||||
|
defer func() {
|
||||||
|
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
||||||
|
}()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Checking if persistent volume exists as expected volume mode")
|
||||||
|
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
|
||||||
|
|
||||||
|
By("Checking if read/write to persistent volume works properly")
|
||||||
|
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
|
||||||
|
})
|
||||||
|
// TODO(mkimuram): Add more tests
|
||||||
}
|
}
|
||||||
case testpatterns.DynamicPV:
|
case testpatterns.DynamicPV:
|
||||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||||
s.sc = dDriver.GetDynamicProvisionStorageClass(fsType)
|
It("should fail in binding dynamic provisioned PV to PVC", func() {
|
||||||
if s.sc == nil {
|
init()
|
||||||
framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
defer cleanup()
|
||||||
}
|
|
||||||
s.sc.VolumeBindingMode = &volBindMode
|
|
||||||
|
|
||||||
claimSize := dDriver.GetClaimSize()
|
cs := f.ClientSet
|
||||||
s.pvc = getClaim(claimSize, ns.Name)
|
ns := f.Namespace
|
||||||
s.pvc.Spec.StorageClassName = &s.sc.Name
|
var err error
|
||||||
s.pvc.Spec.VolumeMode = &volMode
|
|
||||||
|
By("Creating sc")
|
||||||
|
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Creating pv and pvc")
|
||||||
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
cs := f.ClientSet
|
||||||
|
ns := f.Namespace
|
||||||
|
var err error
|
||||||
|
|
||||||
|
By("Creating sc")
|
||||||
|
sc, err = cs.StorageV1().StorageClasses().Create(sc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Creating pv and pvc")
|
||||||
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Creating pod")
|
||||||
|
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc},
|
||||||
|
false, "", false, false, framework.SELinuxLabel,
|
||||||
|
nil, config.ClientNodeName, framework.PodStartTimeout)
|
||||||
|
defer func() {
|
||||||
|
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
||||||
|
}()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
By("Checking if persistent volume exists as expected volume mode")
|
||||||
|
utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1")
|
||||||
|
|
||||||
|
By("Checking if read/write to persistent volume works properly")
|
||||||
|
utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1")
|
||||||
|
})
|
||||||
|
// TODO(mkimuram): Add more tests
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
framework.Failf("Volume mode test doesn't support: %s", volType)
|
framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *volumeModeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) {
|
|
||||||
dInfo := driver.GetDriverInfo()
|
|
||||||
f := dInfo.Config.Framework
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
|
|
||||||
By("Deleting pv and pvc")
|
|
||||||
errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc)
|
|
||||||
if len(errs) > 0 {
|
|
||||||
framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs))
|
|
||||||
}
|
|
||||||
By("Deleting sc")
|
|
||||||
if s.sc != nil {
|
|
||||||
deleteStorageClass(cs, s.sc.Name)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup volume for pre-provisioned volume tests
|
|
||||||
if s.volume != nil {
|
|
||||||
s.volume.DeleteVolume()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type volumeModeTestInput struct {
|
|
||||||
f *framework.Framework
|
|
||||||
sc *storagev1.StorageClass
|
|
||||||
pvc *v1.PersistentVolumeClaim
|
|
||||||
pv *v1.PersistentVolume
|
|
||||||
testVolType testpatterns.TestVolType
|
|
||||||
nodeName string
|
|
||||||
volMode v1.PersistentVolumeMode
|
|
||||||
isBlockSupported bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) {
|
|
||||||
It("should fail to create pod by failing to mount volume", func() {
|
|
||||||
f := input.f
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
|
||||||
|
|
||||||
By("Creating sc")
|
|
||||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
|
||||||
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Prebind pv
|
|
||||||
input.pvc.Spec.VolumeName = input.pv.Name
|
|
||||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
|
|
||||||
|
|
||||||
By("Creating pod")
|
|
||||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
|
|
||||||
false, "", false, false, framework.SELinuxLabel,
|
|
||||||
nil, input.nodeName, framework.PodStartTimeout)
|
|
||||||
defer func() {
|
|
||||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
|
||||||
}()
|
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) {
|
|
||||||
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
|
||||||
f := input.f
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
|
||||||
|
|
||||||
By("Creating sc")
|
|
||||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
|
||||||
input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
// Prebind pv
|
|
||||||
input.pvc.Spec.VolumeName = input.pv.Name
|
|
||||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc))
|
|
||||||
|
|
||||||
By("Creating pod")
|
|
||||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
|
|
||||||
false, "", false, false, framework.SELinuxLabel,
|
|
||||||
nil, input.nodeName, framework.PodStartTimeout)
|
|
||||||
defer func() {
|
|
||||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
|
||||||
}()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Checking if persistent volume exists as expected volume mode")
|
|
||||||
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
|
|
||||||
|
|
||||||
By("Checking if read/write to persistent volume works properly")
|
|
||||||
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
|
|
||||||
})
|
|
||||||
// TODO(mkimuram): Add more tests
|
|
||||||
}
|
|
||||||
|
|
||||||
func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) {
|
|
||||||
It("should fail in binding dynamic provisioned PV to PVC", func() {
|
|
||||||
f := input.f
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
|
||||||
|
|
||||||
By("Creating sc")
|
|
||||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
|
||||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
|
||||||
Expect(err).To(HaveOccurred())
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) {
|
|
||||||
It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() {
|
|
||||||
f := input.f
|
|
||||||
cs := f.ClientSet
|
|
||||||
ns := f.Namespace
|
|
||||||
var err error
|
|
||||||
|
|
||||||
By("Creating sc")
|
|
||||||
input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Creating pv and pvc")
|
|
||||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Creating pod")
|
|
||||||
pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc},
|
|
||||||
false, "", false, false, framework.SELinuxLabel,
|
|
||||||
nil, input.nodeName, framework.PodStartTimeout)
|
|
||||||
defer func() {
|
|
||||||
framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod))
|
|
||||||
}()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Checking if persistent volume exists as expected volume mode")
|
|
||||||
utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1")
|
|
||||||
|
|
||||||
By("Checking if read/write to persistent volume works properly")
|
|
||||||
utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1")
|
|
||||||
})
|
|
||||||
// TODO(mkimuram): Add more tests
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
|
func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
|
||||||
|
@@ -89,101 +89,76 @@ func skipExecTest(driver TestDriver) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput {
|
func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||||
var fsGroup *int64
|
var (
|
||||||
driver := resource.driver
|
dInfo = driver.GetDriverInfo()
|
||||||
dInfo := driver.GetDriverInfo()
|
config *PerTestConfig
|
||||||
f := dInfo.Config.Framework
|
testCleanup func()
|
||||||
volSource := resource.volSource
|
resource *genericVolumeTestResource
|
||||||
|
)
|
||||||
|
|
||||||
if volSource == nil {
|
// No preconditions to test. Normally they would be in a BeforeEach here.
|
||||||
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
|
||||||
|
// This intentionally comes after checking the preconditions because it
|
||||||
|
// registers its own BeforeEach which creates the namespace. Beware that it
|
||||||
|
// also registers an AfterEach which renders f unusable. Any code using
|
||||||
|
// f must run inside an It or Context callback.
|
||||||
|
f := framework.NewDefaultFramework("volumeio")
|
||||||
|
|
||||||
|
init := func() {
|
||||||
|
// Now do the more expensive test initialization.
|
||||||
|
config, testCleanup = driver.PrepareTest(f)
|
||||||
|
resource = createGenericVolumeTestResource(driver, config, pattern)
|
||||||
|
if resource.volSource == nil {
|
||||||
|
framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dInfo.Capabilities[CapFsGroup] {
|
cleanup := func() {
|
||||||
fsGroupVal := int64(1234)
|
if resource != nil {
|
||||||
fsGroup = &fsGroupVal
|
resource.cleanupResource()
|
||||||
|
resource = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if testCleanup != nil {
|
||||||
|
testCleanup()
|
||||||
|
testCleanup = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return volumesTestInput{
|
It("should be mountable", func() {
|
||||||
f: f,
|
skipPersistenceTest(driver)
|
||||||
name: dInfo.Name,
|
init()
|
||||||
config: &dInfo.Config,
|
defer func() {
|
||||||
fsGroup: fsGroup,
|
framework.VolumeTestCleanup(f, convertTestConfig(config))
|
||||||
resource: resource,
|
cleanup()
|
||||||
fsType: pattern.FsType,
|
}()
|
||||||
tests: []framework.VolumeTest{
|
|
||||||
|
tests := []framework.VolumeTest{
|
||||||
{
|
{
|
||||||
Volume: *volSource,
|
Volume: *resource.volSource,
|
||||||
File: "index.html",
|
File: "index.html",
|
||||||
// Must match content
|
// Must match content
|
||||||
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
|
ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s",
|
||||||
dInfo.Name, f.Namespace.Name),
|
dInfo.Name, f.Namespace.Name),
|
||||||
},
|
},
|
||||||
},
|
}
|
||||||
}
|
config := convertTestConfig(config)
|
||||||
}
|
framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent)
|
||||||
|
var fsGroup *int64
|
||||||
func (t *volumesTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) {
|
if dInfo.Capabilities[CapFsGroup] {
|
||||||
Context(getTestNameStr(t, pattern), func() {
|
fsGroupVal := int64(1234)
|
||||||
var (
|
fsGroup = &fsGroupVal
|
||||||
resource genericVolumeTestResource
|
}
|
||||||
input volumesTestInput
|
framework.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests)
|
||||||
needsCleanup bool
|
|
||||||
)
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
needsCleanup = false
|
|
||||||
// Skip unsupported tests to avoid unnecessary resource initialization
|
|
||||||
skipUnsupportedTest(t, driver, pattern)
|
|
||||||
needsCleanup = true
|
|
||||||
|
|
||||||
// Setup test resource for driver and testpattern
|
|
||||||
resource = genericVolumeTestResource{}
|
|
||||||
resource.setupResource(driver, pattern)
|
|
||||||
|
|
||||||
// Create test input
|
|
||||||
input = createVolumesTestInput(pattern, resource)
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
if needsCleanup {
|
|
||||||
resource.cleanupResource(driver, pattern)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
testVolumes(&input)
|
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
type volumesTestInput struct {
|
|
||||||
f *framework.Framework
|
|
||||||
name string
|
|
||||||
config *TestConfig
|
|
||||||
fsGroup *int64
|
|
||||||
fsType string
|
|
||||||
tests []framework.VolumeTest
|
|
||||||
resource genericVolumeTestResource
|
|
||||||
}
|
|
||||||
|
|
||||||
func testVolumes(input *volumesTestInput) {
|
|
||||||
It("should be mountable", func() {
|
|
||||||
f := input.f
|
|
||||||
cs := f.ClientSet
|
|
||||||
defer framework.VolumeTestCleanup(f, convertTestConfig(input.config))
|
|
||||||
|
|
||||||
skipPersistenceTest(input.resource.driver)
|
|
||||||
|
|
||||||
volumeTest := input.tests
|
|
||||||
config := convertTestConfig(input.config)
|
|
||||||
framework.InjectHtml(cs, config, volumeTest[0].Volume, volumeTest[0].ExpectedContent)
|
|
||||||
framework.TestVolumeClient(cs, config, input.fsGroup, input.fsType, input.tests)
|
|
||||||
})
|
|
||||||
It("should allow exec of files on the volume", func() {
|
It("should allow exec of files on the volume", func() {
|
||||||
f := input.f
|
skipExecTest(driver)
|
||||||
skipExecTest(input.resource.driver)
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
testScriptInPod(f, input.resource.volType, input.resource.volSource, input.resource.driver.GetDriverInfo().Config.ClientNodeSelector)
|
testScriptInPod(f, resource.volType, resource.volSource, config.ClientNodeSelector)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -212,21 +212,22 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop
|
|||||||
action := "creating claims with class with waitForFirstConsumer"
|
action := "creating claims with class with waitForFirstConsumer"
|
||||||
suffix := "delayed"
|
suffix := "delayed"
|
||||||
var topoZone string
|
var topoZone string
|
||||||
class := newStorageClass(test, ns, suffix)
|
test.Client = c
|
||||||
|
test.Class = newStorageClass(test, ns, suffix)
|
||||||
if specifyAllowedTopology {
|
if specifyAllowedTopology {
|
||||||
action += " and allowedTopologies"
|
action += " and allowedTopologies"
|
||||||
suffix += "-topo"
|
suffix += "-topo"
|
||||||
topoZone = getRandomClusterZone(c)
|
topoZone = getRandomClusterZone(c)
|
||||||
addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone)
|
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone)
|
||||||
}
|
}
|
||||||
By(action)
|
By(action)
|
||||||
var claims []*v1.PersistentVolumeClaim
|
var claims []*v1.PersistentVolumeClaim
|
||||||
for i := 0; i < pvcCount; i++ {
|
for i := 0; i < pvcCount; i++ {
|
||||||
claim := newClaim(test, ns, suffix)
|
claim := newClaim(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
claim.Spec.StorageClassName = &test.Class.Name
|
||||||
claims = append(claims, claim)
|
claims = append(claims, claim)
|
||||||
}
|
}
|
||||||
pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */)
|
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */)
|
||||||
if node == nil {
|
if node == nil {
|
||||||
framework.Failf("unexpected nil node found")
|
framework.Failf("unexpected nil node found")
|
||||||
}
|
}
|
||||||
@@ -440,10 +441,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
By("Testing " + test.Name)
|
By("Testing " + test.Name)
|
||||||
suffix := fmt.Sprintf("%d", i)
|
suffix := fmt.Sprintf("%d", i)
|
||||||
class := newStorageClass(test, ns, suffix)
|
test.Client = c
|
||||||
claim := newClaim(test, ns, suffix)
|
test.Class = newStorageClass(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim = newClaim(test, ns, suffix)
|
||||||
testsuites.TestDynamicProvisioning(test, c, claim, class)
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
|
test.TestDynamicProvisioning()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the last test with storage.k8s.io/v1beta1 on pvc
|
// Run the last test with storage.k8s.io/v1beta1 on pvc
|
||||||
@@ -455,9 +457,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer deleteStorageClass(c, class.Name)
|
defer deleteStorageClass(c, class.Name)
|
||||||
|
|
||||||
claim := newClaim(*betaTest, ns, "beta")
|
betaTest.Client = c
|
||||||
claim.Spec.StorageClassName = &(class.Name)
|
betaTest.Class = nil
|
||||||
testsuites.TestDynamicProvisioning(*betaTest, c, claim, nil)
|
betaTest.Claim = newClaim(*betaTest, ns, "beta")
|
||||||
|
betaTest.Claim.Spec.StorageClassName = &(class.Name)
|
||||||
|
(*betaTest).TestDynamicProvisioning()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -465,6 +469,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
|
|
||||||
test := testsuites.StorageClassTest{
|
test := testsuites.StorageClassTest{
|
||||||
|
Client: c,
|
||||||
Name: "HDD PD on GCE/GKE",
|
Name: "HDD PD on GCE/GKE",
|
||||||
CloudProviders: []string{"gce", "gke"},
|
CloudProviders: []string{"gce", "gke"},
|
||||||
Provisioner: "kubernetes.io/gce-pd",
|
Provisioner: "kubernetes.io/gce-pd",
|
||||||
@@ -479,12 +484,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
|
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{})
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
class := newStorageClass(test, ns, "reclaimpolicy")
|
test.Class = newStorageClass(test, ns, "reclaimpolicy")
|
||||||
retain := v1.PersistentVolumeReclaimRetain
|
retain := v1.PersistentVolumeReclaimRetain
|
||||||
class.ReclaimPolicy = &retain
|
test.Class.ReclaimPolicy = &retain
|
||||||
claim := newClaim(test, ns, "reclaimpolicy")
|
test.Claim = newClaim(test, ns, "reclaimpolicy")
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
|
pv := test.TestDynamicProvisioning()
|
||||||
|
|
||||||
By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
|
By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
|
||||||
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
|
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
|
||||||
@@ -718,17 +723,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
By("creating a StorageClass")
|
By("creating a StorageClass")
|
||||||
test := testsuites.StorageClassTest{
|
test := testsuites.StorageClassTest{
|
||||||
|
Client: c,
|
||||||
Name: "external provisioner test",
|
Name: "external provisioner test",
|
||||||
Provisioner: externalPluginName,
|
Provisioner: externalPluginName,
|
||||||
ClaimSize: "1500Mi",
|
ClaimSize: "1500Mi",
|
||||||
ExpectedSize: "1500Mi",
|
ExpectedSize: "1500Mi",
|
||||||
}
|
}
|
||||||
class := newStorageClass(test, ns, "external")
|
test.Class = newStorageClass(test, ns, "external")
|
||||||
claim := newClaim(test, ns, "external")
|
test.Claim = newClaim(test, ns, "external")
|
||||||
claim.Spec.StorageClassName = &(class.Name)
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
|
|
||||||
By("creating a claim with a external provisioning annotation")
|
By("creating a claim with a external provisioning annotation")
|
||||||
testsuites.TestDynamicProvisioning(test, c, claim, class)
|
test.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -738,13 +744,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
By("creating a claim with no annotation")
|
By("creating a claim with no annotation")
|
||||||
test := testsuites.StorageClassTest{
|
test := testsuites.StorageClassTest{
|
||||||
|
Client: c,
|
||||||
Name: "default",
|
Name: "default",
|
||||||
ClaimSize: "2Gi",
|
ClaimSize: "2Gi",
|
||||||
ExpectedSize: "2Gi",
|
ExpectedSize: "2Gi",
|
||||||
}
|
}
|
||||||
|
|
||||||
claim := newClaim(test, ns, "default")
|
test.Claim = newClaim(test, ns, "default")
|
||||||
testsuites.TestDynamicProvisioning(test, c, claim, nil)
|
test.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
|
|
||||||
// Modifying the default storage class can be disruptive to other tests that depend on it
|
// Modifying the default storage class can be disruptive to other tests that depend on it
|
||||||
@@ -817,6 +824,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
serverUrl := "http://" + pod.Status.PodIP + ":8081"
|
serverUrl := "http://" + pod.Status.PodIP + ":8081"
|
||||||
By("creating a StorageClass")
|
By("creating a StorageClass")
|
||||||
test := testsuites.StorageClassTest{
|
test := testsuites.StorageClassTest{
|
||||||
|
Client: c,
|
||||||
Name: "Gluster Dynamic provisioner test",
|
Name: "Gluster Dynamic provisioner test",
|
||||||
Provisioner: "kubernetes.io/glusterfs",
|
Provisioner: "kubernetes.io/glusterfs",
|
||||||
ClaimSize: "2Gi",
|
ClaimSize: "2Gi",
|
||||||
@@ -824,13 +832,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
Parameters: map[string]string{"resturl": serverUrl},
|
Parameters: map[string]string{"resturl": serverUrl},
|
||||||
}
|
}
|
||||||
suffix := fmt.Sprintf("glusterdptest")
|
suffix := fmt.Sprintf("glusterdptest")
|
||||||
class := newStorageClass(test, ns, suffix)
|
test.Class = newStorageClass(test, ns, suffix)
|
||||||
|
|
||||||
By("creating a claim object with a suffix for gluster dynamic provisioner")
|
By("creating a claim object with a suffix for gluster dynamic provisioner")
|
||||||
claim := newClaim(test, ns, suffix)
|
test.Claim = newClaim(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
|
|
||||||
testsuites.TestDynamicProvisioning(test, c, claim, class)
|
test.TestDynamicProvisioning()
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -929,12 +937,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
}
|
}
|
||||||
By("creating a claim with class with allowedTopologies set")
|
By("creating a claim with class with allowedTopologies set")
|
||||||
suffix := "topology"
|
suffix := "topology"
|
||||||
class := newStorageClass(test, ns, suffix)
|
test.Client = c
|
||||||
|
test.Class = newStorageClass(test, ns, suffix)
|
||||||
zone := getRandomClusterZone(c)
|
zone := getRandomClusterZone(c)
|
||||||
addSingleZoneAllowedTopologyToStorageClass(c, class, zone)
|
addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone)
|
||||||
claim := newClaim(test, ns, suffix)
|
test.Claim = newClaim(test, ns, suffix)
|
||||||
claim.Spec.StorageClassName = &class.Name
|
test.Claim.Spec.StorageClassName = &test.Class.Name
|
||||||
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
|
pv := test.TestDynamicProvisioning()
|
||||||
checkZoneFromLabelAndAffinity(pv, zone, true)
|
checkZoneFromLabelAndAffinity(pv, zone, true)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
Reference in New Issue
Block a user