Merge pull request #93710 from Jiawei0227/attachable2non
Detect volume attach-ability in the middle of attaching
This commit is contained in:
@@ -42,7 +42,9 @@ import (
|
||||
cachetools "k8s.io/client-go/tools/cache"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
@@ -58,6 +60,7 @@ const (
|
||||
csiNodeLimitUpdateTimeout = 5 * time.Minute
|
||||
csiPodUnschedulableTimeout = 5 * time.Minute
|
||||
csiResizeWaitPeriod = 5 * time.Minute
|
||||
csiVolumeAttachmentTimeout = 7 * time.Minute
|
||||
// how long to wait for Resizing Condition on PVC to appear
|
||||
csiResizingConditionWait = 2 * time.Minute
|
||||
|
||||
@@ -307,6 +310,77 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.Context("CSI CSIDriver deployment after pod creation using non-attachable mock driver", func() {
|
||||
ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func() {
|
||||
var err error
|
||||
init(testParameters{registerDriver: false, disableAttach: true})
|
||||
defer cleanup()
|
||||
|
||||
_, claim, pod := createPod(false /* persistent volume, late binding as specified above */)
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("Checking if attaching failed and pod cannot start")
|
||||
eventSelector := fields.Set{
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.name": pod.Name,
|
||||
"involvedObject.namespace": pod.Namespace,
|
||||
"reason": events.FailedAttachVolume,
|
||||
}.AsSelector().String()
|
||||
msg := "AttachVolume.Attach failed for volume"
|
||||
|
||||
err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, framework.PodStartTimeout)
|
||||
if err != nil {
|
||||
podErr := e2epod.WaitTimeoutForPodRunningInNamespace(m.cs, pod.Name, pod.Namespace, 10*time.Second)
|
||||
framework.ExpectError(podErr, "Pod should not be in running status because attaching should failed")
|
||||
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
|
||||
framework.Logf("Attach should fail and the corresponding event should show up, error: %v", err)
|
||||
}
|
||||
|
||||
// VolumeAttachment should be created because the default value for CSI attachable is true
|
||||
ginkgo.By("Checking if VolumeAttachment was created for the pod")
|
||||
handle := getVolumeHandle(m.cs, claim)
|
||||
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeSelection.Name)))
|
||||
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
||||
_, err = m.cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
|
||||
} else {
|
||||
framework.ExpectNoError(err, "Failed to find VolumeAttachment")
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("Deploy CSIDriver object with attachRequired=false")
|
||||
driverNamespace := m.config.DriverNamespace
|
||||
|
||||
canAttach := false
|
||||
o := utils.PatchCSIOptions{
|
||||
OldDriverName: "csi-mock",
|
||||
NewDriverName: "csi-mock-" + f.UniqueName,
|
||||
CanAttach: &canAttach,
|
||||
}
|
||||
cleanupCSIDriver, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
|
||||
return utils.PatchCSIDeployment(f, o, item)
|
||||
}, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml")
|
||||
if err != nil {
|
||||
framework.Failf("fail to deploy CSIDriver object: %v", err)
|
||||
}
|
||||
m.testCleanups = append(m.testCleanups, cleanupCSIDriver)
|
||||
|
||||
ginkgo.By("Wait for the pod in running status")
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait for the volumeattachment to be deleted up to %v", csiVolumeAttachmentTimeout))
|
||||
// This step can be slow because we have to wait either a NodeUpdate event happens or
|
||||
// the detachment for this volume timeout so that we can do a force detach.
|
||||
err = waitForVolumeAttachmentTerminated(attachmentName, m.cs)
|
||||
framework.ExpectNoError(err, "Failed to delete VolumeAttachment: %v", err)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("CSI workload information using mock driver", func() {
|
||||
var (
|
||||
err error
|
||||
@@ -1186,6 +1260,24 @@ func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Interface) error {
|
||||
waitErr := wait.PollImmediate(10*time.Second, csiVolumeAttachmentTimeout, func() (bool, error) {
|
||||
_, err := cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// if the volumeattachment object is not found, it means it has been terminated.
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
return fmt.Errorf("error waiting volume attachment %v to terminate: %v", attachmentName, waitErr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Interface) (int32, error) {
|
||||
var attachLimit int32
|
||||
|
||||
|
Reference in New Issue
Block a user