Merge branch 'master' into fix-typos
This commit is contained in:
2
pkg/controller/volume/attachdetach/OWNERS
Normal file
2
pkg/controller/volume/attachdetach/OWNERS
Normal file
@@ -0,0 +1,2 @@
|
||||
assignees:
|
||||
- saad-ali
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
// Package volume implements a controller to manage volume attach and detach
|
||||
// operations.
|
||||
package volume
|
||||
package attachdetach
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -28,10 +28,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/populator"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/reconciler"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/statusupdater"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/populator"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/reconciler"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
@@ -50,7 +50,7 @@ const (
|
||||
// attach detach controller will wait for a volume to be safely unmounted
|
||||
// from its node. Once this time has expired, the controller will assume the
|
||||
// node or kubelet are unresponsive and will detach the volume anyway.
|
||||
reconcilerMaxWaitForUnmountDuration time.Duration = 3 * time.Minute
|
||||
reconcilerMaxWaitForUnmountDuration time.Duration = 6 * time.Minute
|
||||
|
||||
// desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the
|
||||
// DesiredStateOfWorldPopulator loop waits between successive executions
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,14 +14,14 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package volume
|
||||
package attachdetach
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
)
|
||||
|
||||
func Test_NewAttachDetachController_Positive(t *testing.T) {
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
||||
)
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -23,8 +23,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/statusupdater"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
@@ -92,9 +92,24 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
|
||||
if !rc.desiredStateOfWorld.VolumeExists(
|
||||
attachedVolume.VolumeName, attachedVolume.NodeName) {
|
||||
// Volume exists in actual state of world but not desired
|
||||
|
||||
// Mark desire to detach
|
||||
timeElapsed, err := rc.actualStateOfWorld.MarkDesireToDetach(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Unexpected error actualStateOfWorld.MarkDesireToDetach(): %v", err)
|
||||
}
|
||||
|
||||
// Update Node Status to indicate volume is no longer safe to mount.
|
||||
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
|
||||
if err != nil {
|
||||
// Skip detaching this volume if unable to update node status
|
||||
glog.Infof("UpdateNodeStatuses failed with: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !attachedVolume.MountedByNode {
|
||||
glog.V(5).Infof("Attempting to start DetachVolume for volume %q from node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, rc.actualStateOfWorld)
|
||||
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, true /* verifySafeToDetach */, rc.actualStateOfWorld)
|
||||
if err == nil {
|
||||
glog.Infof("Started DetachVolume for volume %q from node %q", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
}
|
||||
@@ -112,13 +127,9 @@ func (rc *reconciler) reconciliationLoopFunc() func() {
|
||||
}
|
||||
} else {
|
||||
// If volume is not safe to detach (is mounted) wait a max amount of time before detaching any way.
|
||||
timeElapsed, err := rc.actualStateOfWorld.MarkDesireToDetach(attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Unexpected error actualStateOfWorld.MarkDesireToDetach(): %v", err)
|
||||
}
|
||||
if timeElapsed > rc.maxWaitForUnmountDuration {
|
||||
glog.V(5).Infof("Attempting to start DetachVolume for volume %q from node %q. Volume is not safe to detach, but maxWaitForUnmountDuration expired.", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, rc.actualStateOfWorld)
|
||||
err := rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
|
||||
if err == nil {
|
||||
glog.Infof("Started DetachVolume for volume %q from node %q due to maxWaitForUnmountDuration expiry.", attachedVolume.VolumeName, attachedVolume.NodeName)
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/controller/framework/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/statusupdater"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
@@ -75,10 +75,7 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr)
|
||||
nodeInformer := informers.CreateSharedNodeIndexInformer(
|
||||
fakeKubeClient, resyncPeriod)
|
||||
nsu := statusupdater.NewNodeStatusUpdater(
|
||||
fakeKubeClient, nodeInformer, asw)
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
@@ -121,10 +118,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr)
|
||||
nodeInformer := informers.CreateSharedNodeIndexInformer(
|
||||
fakeKubeClient, resyncPeriod)
|
||||
nsu := statusupdater.NewNodeStatusUpdater(
|
||||
fakeKubeClient, nodeInformer, asw)
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
@@ -188,10 +182,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr)
|
||||
nodeInformer := informers.CreateSharedNodeIndexInformer(
|
||||
fakeKubeClient, resyncPeriod)
|
||||
nsu := statusupdater.NewNodeStatusUpdater(
|
||||
fakeKubeClient, nodeInformer, asw)
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
@@ -241,6 +232,72 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
|
||||
waitForDetachCallCount(t, 1 /* expectedDetachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
// Populates desiredStateOfWorld cache with one node/volume/pod tuple.
|
||||
// Has node update fail
|
||||
// Calls Run()
|
||||
// Verifies there is one attach call and no detach calls.
|
||||
// Marks the node/volume as unmounted.
|
||||
// Deletes the node/volume/pod tuple from desiredStateOfWorld cache.
|
||||
// Verifies there are NO detach call and no (new) attach calls.
|
||||
func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdateStatusFail(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(volumePluginMgr)
|
||||
fakeKubeClient := controllervolumetesting.CreateTestClient()
|
||||
ad := operationexecutor.NewOperationExecutor(fakeKubeClient, volumePluginMgr)
|
||||
nsu := statusupdater.NewFakeNodeStatusUpdater(true /* returnError */)
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := "node-name"
|
||||
dsw.AddNode(nodeName)
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Volume %q/node %q should not exist, but it does.",
|
||||
volumeName,
|
||||
nodeName)
|
||||
}
|
||||
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if podAddErr != nil {
|
||||
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
|
||||
}
|
||||
|
||||
// Act
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
|
||||
// Assert
|
||||
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName), generatedVolumeName, nodeName)
|
||||
volumeExists = dsw.VolumeExists(generatedVolumeName, nodeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"Deleted pod %q from volume %q/node %q. Volume should also be deleted but it still exists.",
|
||||
podName,
|
||||
generatedVolumeName,
|
||||
nodeName)
|
||||
}
|
||||
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
|
||||
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
|
||||
|
||||
// Assert
|
||||
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
verifyNewAttacherCallCount(t, false /* expectZeroNewAttacherCallCount */, fakePlugin)
|
||||
waitForAttachCallCount(t, 1 /* expectedAttachCallCount */, fakePlugin)
|
||||
verifyNewDetacherCallCount(t, false /* expectZeroNewDetacherCallCount */, fakePlugin)
|
||||
waitForDetachCallCount(t, 0 /* expectedDetachCallCount */, fakePlugin)
|
||||
}
|
||||
|
||||
func waitForNewAttacherCallCount(
|
||||
t *testing.T,
|
||||
expectedCallCount int,
|
@@ -0,0 +1,39 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package statusupdater
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func NewFakeNodeStatusUpdater(returnError bool) NodeStatusUpdater {
|
||||
return &fakeNodeStatusUpdater{
|
||||
returnError: returnError,
|
||||
}
|
||||
}
|
||||
|
||||
type fakeNodeStatusUpdater struct {
|
||||
returnError bool
|
||||
}
|
||||
|
||||
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
if fnsu.returnError {
|
||||
return fmt.Errorf("fake error on update node status")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/util/strategicpatch"
|
||||
)
|
||||
|
||||
@@ -121,6 +121,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
|
||||
glog.V(3).Infof(
|
||||
"Updating status for node %q succeeded. patchBytes: %q",
|
||||
nodeName,
|
||||
string(patchBytes))
|
||||
}
|
||||
return nil
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors All rights reserved.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
4
pkg/controller/volume/persistentvolume/OWNERS
Normal file
4
pkg/controller/volume/persistentvolume/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
||||
assignees:
|
||||
- jsafrane
|
||||
- saad-ali
|
||||
- thockin
|
473
pkg/controller/volume/persistentvolume/binder_test.go
Normal file
473
pkg/controller/volume/persistentvolume/binder_test.go
Normal file
@@ -0,0 +1,473 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// Test single call to syncClaim and syncVolume methods.
|
||||
// 1. Fill in the controller with initial data
|
||||
// 2. Call the tested function (syncClaim/syncVolume) via
|
||||
// controllerTest.testCall *once*.
|
||||
// 3. Compare resulting volumes and claims with expected volumes and claims.
|
||||
func TestSync(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
"foo": "true",
|
||||
"bar": "false",
|
||||
}
|
||||
|
||||
tests := []controllerTest{
|
||||
// [Unit test set 1] User did not care which PV they get.
|
||||
// Test the matching with no claim.Spec.VolumeName and with various
|
||||
// volumes.
|
||||
{
|
||||
// syncClaim binds to a matching unbound volume.
|
||||
"1-1 - successful bind",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not do anything when there is no matching volume.
|
||||
"1-2 - noop",
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim resets claim.Status to Pending when there is no
|
||||
// matching volume.
|
||||
"1-3 - reset to Pending",
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds claims to the smallest matching volume
|
||||
"1-4 - smallest volume",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
},
|
||||
newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds a claim only to volume that points to it (by
|
||||
// name), even though a smaller one is available.
|
||||
"1-5 - prebound volume by name - success",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds a claim only to volume that points to it (by
|
||||
// UID), even though a smaller one is available.
|
||||
"1-6 - prebound volume by UID - success",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not bind claim to a volume prebound to a claim with
|
||||
// same name and different UID
|
||||
"1-7 - prebound volume to different claim",
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim completes binding - simulates controller crash after
|
||||
// PV.ClaimRef is saved
|
||||
"1-8 - complete bind after crash - PV bound",
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim completes binding - simulates controller crash after
|
||||
// PV.Status is saved
|
||||
"1-9 - complete bind after crash - PV status saved",
|
||||
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim completes binding - simulates controller crash after
|
||||
// PVC.VolumeName is saved
|
||||
"1-10 - complete bind after crash - PVC bound",
|
||||
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds a claim only when the label selector matches the volume
|
||||
"1-11 - bind when selector matches",
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)),
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not bind a claim when the label selector doesn't match
|
||||
"1-12 - do not bind when selector does not match",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
// [Unit test set 2] User asked for a specific PV.
|
||||
// Test the binding when pv.ClaimRef is already set by controller or
|
||||
// by user.
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that does not exist
|
||||
"2-1 - claim prebound to non-existing volume - noop",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending),
|
||||
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that does not exist.
|
||||
// Check that the claim status is reset to Pending
|
||||
"2-2 - claim prebound to non-existing volume - reset status",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound),
|
||||
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that exists and is
|
||||
// unbound. Check it gets bound and no annBoundByController is set.
|
||||
"2-3 - claim prebound to unbound volume",
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending),
|
||||
newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// claim with claim pre-bound to a PV that is pre-bound to the claim
|
||||
// by name. Check it gets bound and no annBoundByController is set.
|
||||
"2-4 - claim prebound to prebound volume by name",
|
||||
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimPending),
|
||||
newClaimArray("claim2-4", "uid2-4", "10Gi", "volume2-4", api.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that is pre-bound to the
|
||||
// claim by UID. Check it gets bound and no annBoundByController is
|
||||
// set.
|
||||
"2-5 - claim prebound to prebound volume by UID",
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimPending),
|
||||
newClaimArray("claim2-5", "uid2-5", "10Gi", "volume2-5", api.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that is bound to different
|
||||
// claim. Check it's reset to Pending.
|
||||
"2-6 - claim prebound to already bound volume",
|
||||
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimBound),
|
||||
newClaimArray("claim2-6", "uid2-6", "10Gi", "volume2-6", api.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound by controller to a PV that is bound to
|
||||
// different claim. Check it throws an error.
|
||||
"2-7 - claim bound by controller to already bound volume",
|
||||
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController),
|
||||
newClaimArray("claim2-7", "uid2-7", "10Gi", "volume2-7", api.ClaimBound, annBoundByController),
|
||||
noevents, noerrors, testSyncClaimError,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that exists and is
|
||||
// unbound, but does not match the selector. Check it gets bound
|
||||
// and no annBoundByController is set.
|
||||
"2-8 - claim prebound to unbound volume that does not match the selector",
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "10Gi", "volume2-3", api.ClaimBound, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
// [Unit test set 3] Syncing bound claim
|
||||
{
|
||||
// syncClaim with claim bound and its claim.Spec.VolumeName is
|
||||
// removed. Check it's marked as Lost.
|
||||
"3-1 - bound claim with missing VolumeName",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to non-existing volume. Check it's
|
||||
// marked as Lost.
|
||||
"3-2 - bound claim with missing volume",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to unbound volume. Check it's bound.
|
||||
// Also check that Pending phase is set to Bound
|
||||
"3-3 - bound claim with unbound volume",
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to volume with missing (or different)
|
||||
// volume.Spec.ClaimRef.UID. Check that the claim is marked as lost.
|
||||
"3-4 - bound claim with prebound volume",
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to bound volume. Check that the
|
||||
// controller does not do anything. Also check that Pending phase is
|
||||
// set to Bound
|
||||
"3-5 - bound claim with bound volume",
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to a volume that is bound to different
|
||||
// claim. Check that the claim is marked as lost.
|
||||
// TODO: test that an event is emitted
|
||||
"3-6 - bound claim with bound volume",
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted),
|
||||
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to unbound volume. Check it's bound
|
||||
// even if the claim's selector doesn't match the volume. Also
|
||||
// check that Pending phase is set to Bound
|
||||
"3-7 - bound claim with unbound volume where selector doesn't match",
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted)),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
// [Unit test set 4] All syncVolume tests.
|
||||
{
|
||||
// syncVolume with pending volume. Check it's marked as Available.
|
||||
"4-1 - pending volume",
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with prebound pending volume. Check it's marked as
|
||||
// Available.
|
||||
"4-2 - pending prebound volume",
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound to missing claim.
|
||||
// Check the volume gets Released
|
||||
"4-3 - bound volume with missing claim",
|
||||
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound to claim with different UID.
|
||||
// Check the volume gets Released.
|
||||
"4-4 - volume bound to claim with different UID",
|
||||
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted),
|
||||
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound by controller to unbound claim.
|
||||
// Check syncVolume does not do anything.
|
||||
"4-5 - volume bound by controller to unbound claim",
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound by user to unbound claim.
|
||||
// Check syncVolume does not do anything.
|
||||
"4-5 - volume bound by user to bound claim",
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound to bound claim.
|
||||
// Check that the volume is marked as Bound.
|
||||
"4-6 - volume bound by to bound claim",
|
||||
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound),
|
||||
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound by controller to claim bound to
|
||||
// another volume. Check that the volume is rolled back.
|
||||
"4-7 - volume bound by controller to claim bound somewhere else",
|
||||
newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound),
|
||||
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound by user to claim bound to
|
||||
// another volume. Check that the volume is marked as Available
|
||||
// and its UID is reset.
|
||||
"4-8 - volume bound by user to claim bound somewhere else",
|
||||
newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound),
|
||||
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
// volume/claims. The test follows this pattern:
|
||||
// 0. Load the controller with initial data.
|
||||
// 1. Call controllerTest.testCall() once as in TestSync()
|
||||
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
|
||||
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
|
||||
// events). Go to 2. if these calls change anything.
|
||||
// 3. When all changes are processed and no new changes were made, call
|
||||
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
|
||||
// 4. If some changes were done by step 3., go to 2. (simulation of
|
||||
// "volume/claim updated" events, eventually performing step 3. again)
|
||||
// 5. When 3. does not do any changes, finish the tests and compare final set
|
||||
// of volumes/claims with expected claims/volumes and report differences.
|
||||
// Some limit of calls in enforced to prevent endless loops.
|
||||
func TestMultiSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
// Test simple binding
|
||||
{
|
||||
// syncClaim binds to a matching unbound volume.
|
||||
"10-1 - successful bind",
|
||||
newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// Two controllers bound two PVs to single claim. Test one of them
|
||||
// wins and the second rolls back.
|
||||
"10-2 - bind PV race",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
}
|
1242
pkg/controller/volume/persistentvolume/controller.go
Normal file
1242
pkg/controller/volume/persistentvolume/controller.go
Normal file
File diff suppressed because it is too large
Load Diff
580
pkg/controller/volume/persistentvolume/controller_base.go
Normal file
580
pkg/controller/volume/persistentvolume/controller_base.go
Normal file
@@ -0,0 +1,580 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/goroutinemap"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// This file contains the controller base functionality, i.e. framework to
|
||||
// process PV/PVC added/updated/deleted events. The real binding, provisioning,
|
||||
// recycling and deleting is done in controller.go
|
||||
|
||||
// NewPersistentVolumeController creates a new PersistentVolumeController
|
||||
func NewPersistentVolumeController(
|
||||
kubeClient clientset.Interface,
|
||||
syncPeriod time.Duration,
|
||||
provisioner vol.ProvisionableVolumePlugin,
|
||||
recyclers []vol.VolumePlugin,
|
||||
cloud cloudprovider.Interface,
|
||||
clusterName string,
|
||||
volumeSource, claimSource cache.ListerWatcher,
|
||||
eventRecorder record.EventRecorder,
|
||||
enableDynamicProvisioning bool,
|
||||
) *PersistentVolumeController {
|
||||
|
||||
if eventRecorder == nil {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventRecorder = broadcaster.NewRecorder(api.EventSource{Component: "persistentvolume-controller"})
|
||||
}
|
||||
|
||||
controller := &PersistentVolumeController{
|
||||
volumes: newPersistentVolumeOrderedIndex(),
|
||||
claims: cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc),
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventRecorder,
|
||||
runningOperations: goroutinemap.NewGoRoutineMap(false /* exponentialBackOffOnError */),
|
||||
cloud: cloud,
|
||||
provisioner: provisioner,
|
||||
enableDynamicProvisioning: enableDynamicProvisioning,
|
||||
clusterName: clusterName,
|
||||
createProvisionedPVRetryCount: createProvisionedPVRetryCount,
|
||||
createProvisionedPVInterval: createProvisionedPVInterval,
|
||||
}
|
||||
|
||||
controller.recyclePluginMgr.InitPlugins(recyclers, controller)
|
||||
if controller.provisioner != nil {
|
||||
if err := controller.provisioner.Init(controller); err != nil {
|
||||
glog.Errorf("PersistentVolumeController: error initializing provisioner plugin: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if volumeSource == nil {
|
||||
volumeSource = &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().PersistentVolumes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Core().PersistentVolumes().Watch(options)
|
||||
},
|
||||
}
|
||||
}
|
||||
controller.volumeSource = volumeSource
|
||||
|
||||
if claimSource == nil {
|
||||
claimSource = &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
}
|
||||
}
|
||||
controller.claimSource = claimSource
|
||||
|
||||
_, controller.volumeController = framework.NewIndexerInformer(
|
||||
volumeSource,
|
||||
&api.PersistentVolume{},
|
||||
syncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.addVolume,
|
||||
UpdateFunc: controller.updateVolume,
|
||||
DeleteFunc: controller.deleteVolume,
|
||||
},
|
||||
cache.Indexers{"accessmodes": accessModesIndexFunc},
|
||||
)
|
||||
_, controller.claimController = framework.NewInformer(
|
||||
claimSource,
|
||||
&api.PersistentVolumeClaim{},
|
||||
syncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.addClaim,
|
||||
UpdateFunc: controller.updateClaim,
|
||||
DeleteFunc: controller.deleteClaim,
|
||||
},
|
||||
)
|
||||
return controller
|
||||
}
|
||||
|
||||
// initializeCaches fills all controller caches with initial data from etcd in
|
||||
// order to have the caches already filled when first addClaim/addVolume to
|
||||
// perform initial synchronization of the controller.
|
||||
func (ctrl *PersistentVolumeController) initializeCaches(volumeSource, claimSource cache.ListerWatcher) {
|
||||
volumeListObj, err := volumeSource.List(api.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
return
|
||||
}
|
||||
volumeList, ok := volumeListObj.(*api.PersistentVolumeList)
|
||||
if !ok {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of volumes, got: %+v", volumeListObj)
|
||||
return
|
||||
}
|
||||
for _, volume := range volumeList.Items {
|
||||
// Ignore template volumes from kubernetes 1.2
|
||||
deleted := ctrl.upgradeVolumeFrom1_2(&volume)
|
||||
if !deleted {
|
||||
clone, err := conversion.NewCloner().DeepCopy(&volume)
|
||||
if err != nil {
|
||||
glog.Errorf("error cloning volume %q: %v", volume.Name, err)
|
||||
continue
|
||||
}
|
||||
volumeClone := clone.(*api.PersistentVolume)
|
||||
ctrl.storeVolumeUpdate(volumeClone)
|
||||
}
|
||||
}
|
||||
|
||||
claimListObj, err := claimSource.List(api.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
|
||||
return
|
||||
}
|
||||
claimList, ok := claimListObj.(*api.PersistentVolumeClaimList)
|
||||
if !ok {
|
||||
glog.Errorf("PersistentVolumeController can't initialize caches, expected list of claims, got: %+v", claimListObj)
|
||||
return
|
||||
}
|
||||
for _, claim := range claimList.Items {
|
||||
clone, err := conversion.NewCloner().DeepCopy(&claim)
|
||||
if err != nil {
|
||||
glog.Errorf("error cloning claim %q: %v", claimToClaimKey(&claim), err)
|
||||
continue
|
||||
}
|
||||
claimClone := clone.(*api.PersistentVolumeClaim)
|
||||
ctrl.storeClaimUpdate(claimClone)
|
||||
}
|
||||
glog.V(4).Infof("controller initialized")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) storeVolumeUpdate(volume *api.PersistentVolume) (bool, error) {
|
||||
return storeObjectUpdate(ctrl.volumes.store, volume, "volume")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) storeClaimUpdate(claim *api.PersistentVolumeClaim) (bool, error) {
|
||||
return storeObjectUpdate(ctrl.claims, claim, "claim")
|
||||
}
|
||||
|
||||
// addVolume is callback from framework.Controller watching PersistentVolume
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) addVolume(obj interface{}) {
|
||||
pv, ok := obj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("expected PersistentVolume but handler received %+v", obj)
|
||||
return
|
||||
}
|
||||
|
||||
if ctrl.upgradeVolumeFrom1_2(pv) {
|
||||
// volume deleted
|
||||
return
|
||||
}
|
||||
|
||||
// Store the new volume version in the cache and do not process it if this
|
||||
// is an old version.
|
||||
new, err := ctrl.storeVolumeUpdate(pv)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ctrl.syncVolume(pv); err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
glog.V(3).Infof("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
|
||||
} else {
|
||||
glog.Errorf("PersistentVolumeController could not add volume %q: %+v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateVolume is callback from framework.Controller watching PersistentVolume
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) updateVolume(oldObj, newObj interface{}) {
|
||||
newVolume, ok := newObj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolume but handler received %+v", newObj)
|
||||
return
|
||||
}
|
||||
|
||||
if ctrl.upgradeVolumeFrom1_2(newVolume) {
|
||||
// volume deleted
|
||||
return
|
||||
}
|
||||
|
||||
// Store the new volume version in the cache and do not process it if this
|
||||
// is an old version.
|
||||
new, err := ctrl.storeVolumeUpdate(newVolume)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ctrl.syncVolume(newVolume); err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
glog.V(3).Infof("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
|
||||
} else {
|
||||
glog.Errorf("PersistentVolumeController could not update volume %q: %+v", newVolume.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteVolume is callback from framework.Controller watching PersistentVolume
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) deleteVolume(obj interface{}) {
|
||||
_ = ctrl.volumes.store.Delete(obj)
|
||||
|
||||
var volume *api.PersistentVolume
|
||||
var ok bool
|
||||
volume, ok = obj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
|
||||
volume, ok = unknown.Obj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", unknown.Obj)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Expected PersistentVolume but deleteVolume received %+v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !ok || volume == nil || volume.Spec.ClaimRef == nil {
|
||||
return
|
||||
}
|
||||
|
||||
glog.V(4).Infof("volume %q deleted", volume.Name)
|
||||
|
||||
if claimObj, exists, _ := ctrl.claims.GetByKey(claimrefToClaimKey(volume.Spec.ClaimRef)); exists {
|
||||
if claim, ok := claimObj.(*api.PersistentVolumeClaim); ok && claim != nil {
|
||||
// sync the claim when its volume is deleted. Explicitly syncing the
|
||||
// claim here in response to volume deletion prevents the claim from
|
||||
// waiting until the next sync period for its Lost status.
|
||||
err := ctrl.syncClaim(claim)
|
||||
if err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the
|
||||
// controller recovers from it easily.
|
||||
glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err)
|
||||
} else {
|
||||
glog.Errorf("PersistentVolumeController could not update volume %q from deleteVolume handler: %+v", claimToClaimKey(claim), err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Cannot convert object from claim cache to claim %q!?: %+v", claimrefToClaimKey(volume.Spec.ClaimRef), claimObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// addClaim is callback from framework.Controller watching PersistentVolumeClaim
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) addClaim(obj interface{}) {
|
||||
// Store the new claim version in the cache and do not process it if this is
|
||||
// an old version.
|
||||
claim, ok := obj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolumeClaim but addClaim received %+v", obj)
|
||||
return
|
||||
}
|
||||
|
||||
new, err := ctrl.storeClaimUpdate(claim)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ctrl.syncClaim(claim); err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
glog.V(3).Infof("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err)
|
||||
} else {
|
||||
glog.Errorf("PersistentVolumeController could not add claim %q: %+v", claimToClaimKey(claim), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateClaim is callback from framework.Controller watching PersistentVolumeClaim
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) updateClaim(oldObj, newObj interface{}) {
|
||||
// Store the new claim version in the cache and do not process it if this is
|
||||
// an old version.
|
||||
newClaim, ok := newObj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolumeClaim but updateClaim received %+v", newObj)
|
||||
return
|
||||
}
|
||||
|
||||
new, err := ctrl.storeClaimUpdate(newClaim)
|
||||
if err != nil {
|
||||
glog.Errorf("%v", err)
|
||||
}
|
||||
if !new {
|
||||
return
|
||||
}
|
||||
|
||||
if err := ctrl.syncClaim(newClaim); err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the controller
|
||||
// recovers from it easily.
|
||||
glog.V(3).Infof("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
|
||||
} else {
|
||||
glog.Errorf("PersistentVolumeController could not update claim %q: %+v", claimToClaimKey(newClaim), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deleteClaim is callback from framework.Controller watching PersistentVolumeClaim
|
||||
// events.
|
||||
func (ctrl *PersistentVolumeController) deleteClaim(obj interface{}) {
|
||||
_ = ctrl.claims.Delete(obj)
|
||||
|
||||
var volume *api.PersistentVolume
|
||||
var claim *api.PersistentVolumeClaim
|
||||
var ok bool
|
||||
|
||||
claim, ok = obj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
|
||||
claim, ok = unknown.Obj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", unknown.Obj)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Expected PersistentVolumeClaim but deleteClaim received %+v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if !ok || claim == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("claim %q deleted", claimToClaimKey(claim))
|
||||
|
||||
if pvObj, exists, _ := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName); exists {
|
||||
if volume, ok = pvObj.(*api.PersistentVolume); ok {
|
||||
// sync the volume when its claim is deleted. Explicitly sync'ing the
|
||||
// volume here in response to claim deletion prevents the volume from
|
||||
// waiting until the next sync period for its Release.
|
||||
if volume != nil {
|
||||
err := ctrl.syncVolume(volume)
|
||||
if err != nil {
|
||||
if errors.IsConflict(err) {
|
||||
// Version conflict error happens quite often and the
|
||||
// controller recovers from it easily.
|
||||
glog.V(3).Infof("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err)
|
||||
} else {
|
||||
glog.Errorf("PersistentVolumeController could not update volume %q from deleteClaim handler: %+v", volume.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("Cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, pvObj)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts all of this controller's control loops
|
||||
func (ctrl *PersistentVolumeController) Run() {
|
||||
glog.V(4).Infof("starting PersistentVolumeController")
|
||||
|
||||
ctrl.initializeCaches(ctrl.volumeSource, ctrl.claimSource)
|
||||
|
||||
if ctrl.volumeControllerStopCh == nil {
|
||||
ctrl.volumeControllerStopCh = make(chan struct{})
|
||||
go ctrl.volumeController.Run(ctrl.volumeControllerStopCh)
|
||||
}
|
||||
|
||||
if ctrl.claimControllerStopCh == nil {
|
||||
ctrl.claimControllerStopCh = make(chan struct{})
|
||||
go ctrl.claimController.Run(ctrl.claimControllerStopCh)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down this controller
|
||||
func (ctrl *PersistentVolumeController) Stop() {
|
||||
glog.V(4).Infof("stopping PersistentVolumeController")
|
||||
close(ctrl.volumeControllerStopCh)
|
||||
close(ctrl.claimControllerStopCh)
|
||||
}
|
||||
|
||||
const (
|
||||
// these pair of constants are used by the provisioner in Kubernetes 1.2.
|
||||
pvProvisioningRequiredAnnotationKey = "volume.experimental.kubernetes.io/provisioning-required"
|
||||
pvProvisioningCompletedAnnotationValue = "volume.experimental.kubernetes.io/provisioning-completed"
|
||||
)
|
||||
|
||||
// upgradeVolumeFrom1_2 updates PV from Kubernetes 1.2 to 1.3 and newer. In 1.2,
|
||||
// we used template PersistentVolume instances for dynamic provisioning. In 1.3
|
||||
// and later, these template (and not provisioned) instances must be removed to
|
||||
// make the controller to provision a new PV.
|
||||
// It returns true if the volume was deleted.
|
||||
// TODO: remove this function when upgrade from 1.2 becomes unsupported.
|
||||
func (ctrl *PersistentVolumeController) upgradeVolumeFrom1_2(volume *api.PersistentVolume) bool {
|
||||
annValue, found := volume.Annotations[pvProvisioningRequiredAnnotationKey]
|
||||
if !found {
|
||||
// The volume is not template
|
||||
return false
|
||||
}
|
||||
if annValue == pvProvisioningCompletedAnnotationValue {
|
||||
// The volume is already fully provisioned. The new controller will
|
||||
// ignore this annotation and it will obey its ReclaimPolicy, which is
|
||||
// likely to delete the volume when appropriate claim is deleted.
|
||||
return false
|
||||
}
|
||||
glog.V(2).Infof("deleting unprovisioned template volume %q from Kubernetes 1.2.", volume.Name)
|
||||
err := ctrl.kubeClient.Core().PersistentVolumes().Delete(volume.Name, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("cannot delete unprovisioned template volume %q: %v", volume.Name, err)
|
||||
}
|
||||
// Remove from local cache
|
||||
err = ctrl.volumes.store.Delete(volume)
|
||||
if err != nil {
|
||||
glog.Errorf("cannot remove volume %q from local cache: %v", volume.Name, err)
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Stateless functions
|
||||
|
||||
func hasAnnotation(obj api.ObjectMeta, ann string) bool {
|
||||
_, found := obj.Annotations[ann]
|
||||
return found
|
||||
}
|
||||
|
||||
func setAnnotation(obj *api.ObjectMeta, ann string, value string) {
|
||||
if obj.Annotations == nil {
|
||||
obj.Annotations = make(map[string]string)
|
||||
}
|
||||
obj.Annotations[ann] = value
|
||||
}
|
||||
|
||||
func getClaimStatusForLogging(claim *api.PersistentVolumeClaim) string {
|
||||
bound := hasAnnotation(claim.ObjectMeta, annBindCompleted)
|
||||
boundByController := hasAnnotation(claim.ObjectMeta, annBoundByController)
|
||||
|
||||
return fmt.Sprintf("phase: %s, bound to: %q, bindCompleted: %v, boundByController: %v", claim.Status.Phase, claim.Spec.VolumeName, bound, boundByController)
|
||||
}
|
||||
|
||||
func getVolumeStatusForLogging(volume *api.PersistentVolume) string {
|
||||
boundByController := hasAnnotation(volume.ObjectMeta, annBoundByController)
|
||||
claimName := ""
|
||||
if volume.Spec.ClaimRef != nil {
|
||||
claimName = fmt.Sprintf("%s/%s (uid: %s)", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name, volume.Spec.ClaimRef.UID)
|
||||
}
|
||||
return fmt.Sprintf("phase: %s, bound to: %q, boundByController: %v", volume.Status.Phase, claimName, boundByController)
|
||||
}
|
||||
|
||||
// isVolumeBoundToClaim returns true, if given volume is pre-bound or bound
|
||||
// to specific claim. Both claim.Name and claim.Namespace must be equal.
|
||||
// If claim.UID is present in volume.Spec.ClaimRef, it must be equal too.
|
||||
func isVolumeBoundToClaim(volume *api.PersistentVolume, claim *api.PersistentVolumeClaim) bool {
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
return false
|
||||
}
|
||||
if claim.Name != volume.Spec.ClaimRef.Name || claim.Namespace != volume.Spec.ClaimRef.Namespace {
|
||||
return false
|
||||
}
|
||||
if volume.Spec.ClaimRef.UID != "" && claim.UID != volume.Spec.ClaimRef.UID {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// storeObjectUpdate updates given cache with a new object version from Informer
|
||||
// callback (i.e. with events from etcd) or with an object modified by the
|
||||
// controller itself. Returns "true", if the cache was updated, false if the
|
||||
// object is an old version and should be ignored.
|
||||
func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) {
|
||||
objAccessor, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error reading cache of %s: %v", className, err)
|
||||
}
|
||||
objName := objAccessor.GetNamespace() + "/" + objAccessor.GetName()
|
||||
|
||||
oldObj, found, err := store.Get(obj)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error finding %s %q in controller cache: %v", className, objName, err)
|
||||
}
|
||||
|
||||
if !found {
|
||||
// This is a new object
|
||||
glog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
if err = store.Add(obj); err != nil {
|
||||
return false, fmt.Errorf("Error adding %s %q to controller cache: %v", className, objName, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
oldObjAccessor, err := meta.Accessor(oldObj)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
objResourceVersion, err := strconv.ParseInt(objAccessor.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error parsing ResourceVersion %q of %s %q: %s", objAccessor.GetResourceVersion(), className, objName, err)
|
||||
}
|
||||
oldObjResourceVersion, err := strconv.ParseInt(oldObjAccessor.GetResourceVersion(), 10, 64)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Error parsing old ResourceVersion %q of %s %q: %s", oldObjAccessor.GetResourceVersion(), className, objName, err)
|
||||
}
|
||||
|
||||
// Throw away only older version, let the same version pass - we do want to
|
||||
// get periodic sync events.
|
||||
if oldObjResourceVersion > objResourceVersion {
|
||||
glog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
return false, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
|
||||
if err = store.Update(obj); err != nil {
|
||||
return false, fmt.Errorf("Error updating %s %q in controller cache: %v", className, objName, err)
|
||||
}
|
||||
return true, nil
|
||||
}
|
289
pkg/controller/volume/persistentvolume/controller_test.go
Normal file
289
pkg/controller/volume/persistentvolume/controller_test.go
Normal file
@@ -0,0 +1,289 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
)
|
||||
|
||||
// Test the real controller methods (add/update/delete claim/volume) with
|
||||
// a fake API server.
|
||||
// There is no controller API to 'initiate syncAll now', therefore these tests
|
||||
// can't reliably simulate periodic sync of volumes/claims - it would be
|
||||
// either very timing-sensitive or slow to wait for real periodic sync.
|
||||
func TestControllerSync(t *testing.T) {
|
||||
expectedChanges := []int{4, 1, 1, 2, 1, 1, 1}
|
||||
tests := []controllerTest{
|
||||
// [Unit test set 5] - controller tests.
|
||||
// We test the controller as if
|
||||
// it was connected to real API server, i.e. we call add/update/delete
|
||||
// Claim/Volume methods. Also, all changes to volumes and claims are
|
||||
// sent to add/update/delete Claim/Volume as real controller would do.
|
||||
{
|
||||
// addClaim gets a new claim. Check it's bound to a volume.
|
||||
"5-2 - complete bind",
|
||||
newVolumeArray("volume5-2", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume5-2", "10Gi", "uid5-2", "claim5-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
noclaims, /* added in testAddClaim5_2 */
|
||||
newClaimArray("claim5-2", "uid5-2", "1Gi", "volume5-2", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors,
|
||||
// Custom test function that generates an add event
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
claim := newClaim("claim5-2", "uid5-2", "1Gi", "", api.ClaimPending)
|
||||
reactor.addClaimEvent(claim)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// deleteClaim with a bound claim makes bound volume released.
|
||||
"5-3 - delete claim",
|
||||
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume5-3", "10Gi", "uid5-3", "claim5-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim5-3", "uid5-3", "1Gi", "volume5-3", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
// Custom test function that generates a delete event
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
obj := ctrl.claims.List()[0]
|
||||
claim := obj.(*api.PersistentVolumeClaim)
|
||||
reactor.deleteClaimEvent(claim)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// deleteVolume with a bound volume. Check the claim is Lost.
|
||||
"5-4 - delete volume",
|
||||
newVolumeArray("volume5-4", "10Gi", "uid5-4", "claim5-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
novolumes,
|
||||
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim5-4", "uid5-4", "1Gi", "volume5-4", api.ClaimLost, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimLost"}, noerrors,
|
||||
// Custom test function that generates a delete event
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
obj := ctrl.volumes.store.List()[0]
|
||||
volume := obj.(*api.PersistentVolume)
|
||||
reactor.deleteVolumeEvent(volume)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// addVolume with provisioned volume from Kubernetes 1.2. No "action"
|
||||
// is expected - it should stay bound.
|
||||
"5-5 - add bound volume from 1.2",
|
||||
novolumes,
|
||||
[]*api.PersistentVolume{addVolumeAnnotation(newVolume("volume5-5", "10Gi", "uid5-5", "claim5-5", api.VolumeBound, api.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
|
||||
newClaimArray("claim5-5", "uid5-5", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim5-5", "uid5-5", "1Gi", "volume5-5", api.ClaimBound, annBindCompleted, annBoundByController),
|
||||
noevents, noerrors,
|
||||
// Custom test function that generates a add event
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
volume := newVolume("volume5-5", "10Gi", "uid5-5", "claim5-5", api.VolumeBound, api.PersistentVolumeReclaimDelete)
|
||||
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
|
||||
reactor.addVolumeEvent(volume)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// updateVolume with provisioned volume from Kubernetes 1.2. No
|
||||
// "action" is expected - it should stay bound.
|
||||
"5-6 - update bound volume from 1.2",
|
||||
[]*api.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", api.VolumeBound, api.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
|
||||
[]*api.PersistentVolume{addVolumeAnnotation(newVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", api.VolumeBound, api.PersistentVolumeReclaimDelete), pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)},
|
||||
newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", api.ClaimBound),
|
||||
newClaimArray("claim5-6", "uid5-6", "1Gi", "volume5-6", api.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors,
|
||||
// Custom test function that generates a add event
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
volume := newVolume("volume5-6", "10Gi", "uid5-6", "claim5-6", api.VolumeBound, api.PersistentVolumeReclaimDelete)
|
||||
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue)
|
||||
reactor.modifyVolumeEvent(volume)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// addVolume with unprovisioned volume from Kubernetes 1.2. The
|
||||
// volume should be deleted.
|
||||
"5-7 - add unprovisioned volume from 1.2",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim5-7", "uid5-7", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim5-7", "uid5-7", "1Gi", "", api.ClaimPending),
|
||||
noevents, noerrors,
|
||||
// Custom test function that generates a add event
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
volume := newVolume("volume5-7", "10Gi", "uid5-7", "claim5-7", api.VolumeBound, api.PersistentVolumeReclaimDelete)
|
||||
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
|
||||
reactor.addVolumeEvent(volume)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
// updateVolume with unprovisioned volume from Kubernetes 1.2. The
|
||||
// volume should be deleted.
|
||||
"5-8 - update bound volume from 1.2",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim5-8", "uid5-8", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim5-8", "uid5-8", "1Gi", "", api.ClaimPending),
|
||||
noevents, noerrors,
|
||||
// Custom test function that generates a add event
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
volume := newVolume("volume5-8", "10Gi", "uid5-8", "claim5-8", api.VolumeBound, api.PersistentVolumeReclaimDelete)
|
||||
volume = addVolumeAnnotation(volume, pvProvisioningRequiredAnnotationKey, "yes")
|
||||
reactor.modifyVolumeEvent(volume)
|
||||
return nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for ix, test := range tests {
|
||||
glog.V(4).Infof("starting test %q", test.name)
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
volumeSource := framework.NewFakePVControllerSource()
|
||||
claimSource := framework.NewFakePVCControllerSource()
|
||||
ctrl := newTestController(client, volumeSource, claimSource, true)
|
||||
reactor := newVolumeReactor(client, ctrl, volumeSource, claimSource, test.errors)
|
||||
for _, claim := range test.initialClaims {
|
||||
claimSource.Add(claim)
|
||||
reactor.claims[claim.Name] = claim
|
||||
}
|
||||
for _, volume := range test.initialVolumes {
|
||||
volumeSource.Add(volume)
|
||||
reactor.volumes[volume.Name] = volume
|
||||
}
|
||||
|
||||
// Start the controller
|
||||
count := reactor.getChangeCount()
|
||||
go ctrl.Run()
|
||||
|
||||
// Wait for the controller to pass initial sync and fill its caches.
|
||||
for !ctrl.volumeController.HasSynced() ||
|
||||
!ctrl.claimController.HasSynced() ||
|
||||
len(ctrl.claims.ListKeys()) < len(test.initialClaims) ||
|
||||
len(ctrl.volumes.store.ListKeys()) < len(test.initialVolumes) {
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
glog.V(4).Infof("controller synced, starting test")
|
||||
|
||||
// Call the tested function
|
||||
err := test.test(ctrl, reactor, test)
|
||||
if err != nil {
|
||||
t.Errorf("Test %q initial test call failed: %v", test.name, err)
|
||||
}
|
||||
// Simulate a periodic resync, just in case some events arrived in a
|
||||
// wrong order.
|
||||
ctrl.claims.Resync()
|
||||
ctrl.volumes.store.Resync()
|
||||
|
||||
// Wait at least once, just in case expectedChanges[ix] == 0
|
||||
reactor.waitTest()
|
||||
// Wait for expected number of operations.
|
||||
for reactor.getChangeCount() < count+expectedChanges[ix] {
|
||||
reactor.waitTest()
|
||||
}
|
||||
|
||||
ctrl.Stop()
|
||||
|
||||
evaluateTestResults(ctrl, reactor, test, t)
|
||||
}
|
||||
}
|
||||
|
||||
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
|
||||
pv := newVolume("pvName", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimDelete)
|
||||
pv.ResourceVersion = version
|
||||
ret, err := storeObjectUpdate(c, pv, "volume")
|
||||
if err != nil {
|
||||
t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
|
||||
}
|
||||
if expectedReturn != ret {
|
||||
t.Errorf("%s: expected storeObjectUpdate to return %v, got: %v", prefix, expectedReturn, ret)
|
||||
}
|
||||
|
||||
// find the stored version
|
||||
|
||||
pvObj, found, err := c.GetByKey("pvName")
|
||||
if err != nil {
|
||||
t.Errorf("expected volume 'pvName' in the cache, got error instead: %v", err)
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("expected volume 'pvName' in the cache but it was not found")
|
||||
}
|
||||
pv, ok := pvObj.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
t.Errorf("expected volume in the cache, got different object instead: %+v", pvObj)
|
||||
}
|
||||
|
||||
if ret {
|
||||
if pv.ResourceVersion != version {
|
||||
t.Errorf("expected volume with version %s in the cache, got %s instead", version, pv.ResourceVersion)
|
||||
}
|
||||
} else {
|
||||
if pv.ResourceVersion == version {
|
||||
t.Errorf("expected volume with version other than %s in the cache, got %s instead", version, pv.ResourceVersion)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestControllerCache tests func storeObjectUpdate()
|
||||
func TestControllerCache(t *testing.T) {
|
||||
// Cache under test
|
||||
c := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
|
||||
|
||||
// Store new PV
|
||||
storeVersion(t, "Step1", c, "1", true)
|
||||
// Store the same PV
|
||||
storeVersion(t, "Step2", c, "1", true)
|
||||
// Store newer PV
|
||||
storeVersion(t, "Step3", c, "2", true)
|
||||
// Store older PV - simulating old "PV updated" event or periodic sync with
|
||||
// old data
|
||||
storeVersion(t, "Step4", c, "1", false)
|
||||
// Store newer PV - test integer parsing ("2" > "10" as string,
|
||||
// while 2 < 10 as integers)
|
||||
storeVersion(t, "Step5", c, "10", true)
|
||||
}
|
||||
|
||||
func TestControllerCacheParsingError(t *testing.T) {
|
||||
c := cache.NewStore(framework.DeletionHandlingMetaNamespaceKeyFunc)
|
||||
// There must be something in the cache to compare with
|
||||
storeVersion(t, "Step1", c, "1", true)
|
||||
|
||||
pv := newVolume("pvName", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimDelete)
|
||||
pv.ResourceVersion = "xxx"
|
||||
_, err := storeObjectUpdate(c, pv, "volume")
|
||||
if err == nil {
|
||||
t.Errorf("Expected parsing error, got nil instead")
|
||||
}
|
||||
}
|
||||
|
||||
func addVolumeAnnotation(volume *api.PersistentVolume, annName, annValue string) *api.PersistentVolume {
|
||||
if volume.Annotations == nil {
|
||||
volume.Annotations = make(map[string]string)
|
||||
}
|
||||
volume.Annotations[annName] = annValue
|
||||
return volume
|
||||
}
|
169
pkg/controller/volume/persistentvolume/delete_test.go
Normal file
169
pkg/controller/volume/persistentvolume/delete_test.go
Normal file
@@ -0,0 +1,169 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
// 1. Fill in the controller with initial data
|
||||
// 2. Call the syncVolume *once*.
|
||||
// 3. Compare resulting volumes with expected volumes.
|
||||
func TestDeleteSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// delete volume bound by controller
|
||||
"8-1 - successful delete",
|
||||
newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete volume bound by user
|
||||
"8-2 - successful delete with prebound volume",
|
||||
newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete failure - plugin not found
|
||||
"8-3 - plugin not found",
|
||||
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
withMessage("Error getting deleter volume plugin for volume \"volume8-3\": no volume plugin matched", newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeFailed, api.PersistentVolumeReclaimDelete)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// delete failure - newDeleter returns error
|
||||
"8-4 - newDeleter returns error",
|
||||
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
withMessage("Failed to create deleter for volume \"volume8-4\": Mock plugin error: no deleteCalls configured", newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeFailed, api.PersistentVolumeReclaimDelete)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete failure - delete() returns error
|
||||
"8-5 - delete returns error",
|
||||
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
withMessage("Delete of volume \"volume8-5\" failed: Mock delete error", newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeFailed, api.PersistentVolumeReclaimDelete)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error")}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// delete success(?) - volume is deleted before doDelete() starts
|
||||
"8-6 - volume is deleted before deleting",
|
||||
newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Delete the volume before delete operation starts
|
||||
reactor.lock.Lock()
|
||||
delete(reactor.volumes, "volume8-6")
|
||||
reactor.lock.Unlock()
|
||||
}),
|
||||
},
|
||||
{
|
||||
// delete success(?) - volume is bound just at the time doDelete()
|
||||
// starts. This simulates "volume no longer needs recycling,
|
||||
// skipping".
|
||||
"8-7 - volume is bound before deleting",
|
||||
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
noclaims,
|
||||
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound),
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
reactor.lock.Lock()
|
||||
defer reactor.lock.Unlock()
|
||||
// Bind the volume to resurrected claim (this should never
|
||||
// happen)
|
||||
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound)
|
||||
reactor.claims[claim.Name] = claim
|
||||
ctrl.claims.Add(claim)
|
||||
volume := reactor.volumes["volume8-7"]
|
||||
volume.Status.Phase = api.VolumeBound
|
||||
}),
|
||||
},
|
||||
{
|
||||
// delete success - volume bound by user is deleted, while a new
|
||||
// claim is created with another UID.
|
||||
"8-9 - prebound volume is deleted while the claim exists",
|
||||
newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending),
|
||||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
// volume/claims. The test follows this pattern:
|
||||
// 0. Load the controller with initial data.
|
||||
// 1. Call controllerTest.testCall() once as in TestSync()
|
||||
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
|
||||
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
|
||||
// events). Go to 2. if these calls change anything.
|
||||
// 3. When all changes are processed and no new changes were made, call
|
||||
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
|
||||
// 4. If some changes were done by step 3., go to 2. (simulation of
|
||||
// "volume/claim updated" events, eventually performing step 3. again)
|
||||
// 5. When 3. does not do any changes, finish the tests and compare final set
|
||||
// of volumes/claims with expected claims/volumes and report differences.
|
||||
// Some limit of calls in enforced to prevent endless loops.
|
||||
func TestDeleteMultiSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// delete failure - delete returns error. The controller should
|
||||
// try again.
|
||||
"9-1 - delete returns error",
|
||||
newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
}
|
1112
pkg/controller/volume/persistentvolume/framework_test.go
Normal file
1112
pkg/controller/volume/persistentvolume/framework_test.go
Normal file
File diff suppressed because it is too large
Load Diff
274
pkg/controller/volume/persistentvolume/index.go
Normal file
274
pkg/controller/volume/persistentvolume/index.go
Normal file
@@ -0,0 +1,274 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes
|
||||
// indexed by AccessModes and ordered by storage capacity.
|
||||
type persistentVolumeOrderedIndex struct {
|
||||
store cache.Indexer
|
||||
}
|
||||
|
||||
func newPersistentVolumeOrderedIndex() persistentVolumeOrderedIndex {
|
||||
return persistentVolumeOrderedIndex{cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"accessmodes": accessModesIndexFunc})}
|
||||
}
|
||||
|
||||
// accessModesIndexFunc is an indexing function that returns a persistent
|
||||
// volume's AccessModes as a string
|
||||
func accessModesIndexFunc(obj interface{}) ([]string, error) {
|
||||
if pv, ok := obj.(*api.PersistentVolume); ok {
|
||||
modes := api.GetAccessModesAsString(pv.Spec.AccessModes)
|
||||
return []string{modes}, nil
|
||||
}
|
||||
return []string{""}, fmt.Errorf("object is not a persistent volume: %v", obj)
|
||||
}
|
||||
|
||||
// listByAccessModes returns all volumes with the given set of
|
||||
// AccessModeTypes. The list is unsorted!
|
||||
func (pvIndex *persistentVolumeOrderedIndex) listByAccessModes(modes []api.PersistentVolumeAccessMode) ([]*api.PersistentVolume, error) {
|
||||
pv := &api.PersistentVolume{
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: modes,
|
||||
},
|
||||
}
|
||||
|
||||
objs, err := pvIndex.store.Index("accessmodes", pv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumes := make([]*api.PersistentVolume, len(objs))
|
||||
for i, obj := range objs {
|
||||
volumes[i] = obj.(*api.PersistentVolume)
|
||||
}
|
||||
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
// matchPredicate is a function that indicates that a persistent volume matches another
|
||||
type matchPredicate func(compareThis, toThis *api.PersistentVolume) bool
|
||||
|
||||
// find returns the nearest PV from the ordered list or nil if a match is not found
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVolumeClaim, matchPredicate matchPredicate) (*api.PersistentVolume, error) {
|
||||
// PVs are indexed by their access modes to allow easier searching. Each
|
||||
// index is the string representation of a set of access modes. There is a
|
||||
// finite number of possible sets and PVs will only be indexed in one of
|
||||
// them (whichever index matches the PV's modes).
|
||||
//
|
||||
// A request for resources will always specify its desired access modes.
|
||||
// Any matching PV must have at least that number of access modes, but it
|
||||
// can have more. For example, a user asks for ReadWriteOnce but a GCEPD
|
||||
// is available, which is ReadWriteOnce+ReadOnlyMany.
|
||||
//
|
||||
// Searches are performed against a set of access modes, so we can attempt
|
||||
// not only the exact matching modes but also potential matches (the GCEPD
|
||||
// example above).
|
||||
allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes)
|
||||
|
||||
var smallestVolume *api.PersistentVolume
|
||||
var smallestVolumeSize int64
|
||||
requestedQty := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
|
||||
requestedSize := requestedQty.Value()
|
||||
|
||||
var selector labels.Selector
|
||||
if claim.Spec.Selector != nil {
|
||||
internalSelector, err := unversioned.LabelSelectorAsSelector(claim.Spec.Selector)
|
||||
if err != nil {
|
||||
// should be unreachable code due to validation
|
||||
return nil, fmt.Errorf("error creating internal label selector for claim: %v: %v", claimToClaimKey(claim), err)
|
||||
}
|
||||
selector = internalSelector
|
||||
}
|
||||
|
||||
for _, modes := range allPossibleModes {
|
||||
volumes, err := pvIndex.listByAccessModes(modes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Go through all available volumes with two goals:
|
||||
// - find a volume that is either pre-bound by user or dynamically
|
||||
// provisioned for this claim. Because of this we need to loop through
|
||||
// all volumes.
|
||||
// - find the smallest matching one if there is no volume pre-bound to
|
||||
// the claim.
|
||||
for _, volume := range volumes {
|
||||
if isVolumeBoundToClaim(volume, claim) {
|
||||
// this claim and volume are bound; return it,
|
||||
// whether the claim is prebound or for volumes
|
||||
// intended for dynamic provisioning v1
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
// filter out:
|
||||
// - volumes bound to another claim
|
||||
// - volumes whose labels don't match the claim's selector, if specified
|
||||
if volume.Spec.ClaimRef != nil {
|
||||
continue
|
||||
} else if selector != nil && !selector.Matches(labels.Set(volume.Labels)) {
|
||||
continue
|
||||
}
|
||||
|
||||
volumeQty := volume.Spec.Capacity[api.ResourceStorage]
|
||||
volumeSize := volumeQty.Value()
|
||||
if volumeSize >= requestedSize {
|
||||
if smallestVolume == nil || smallestVolumeSize > volumeSize {
|
||||
smallestVolume = volume
|
||||
smallestVolumeSize = volumeSize
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We want to provision volumes if the annotation is set even if there
|
||||
// is matching PV. Therefore, do not look for available PV and let
|
||||
// a new volume to be provisioned.
|
||||
//
|
||||
// When provisioner creates a new PV to this claim, an exact match
|
||||
// pre-bound to the claim will be found by the checks above during
|
||||
// subsequent claim sync.
|
||||
if hasAnnotation(claim.ObjectMeta, annClass) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if smallestVolume != nil {
|
||||
// Found a matching volume
|
||||
return smallestVolume, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolume, error) {
|
||||
return pvIndex.findByClaim(claim, matchStorageCapacity)
|
||||
}
|
||||
|
||||
// matchStorageCapacity is a matchPredicate used to sort and find volumes
|
||||
func matchStorageCapacity(pvA, pvB *api.PersistentVolume) bool {
|
||||
aQty := pvA.Spec.Capacity[api.ResourceStorage]
|
||||
bQty := pvB.Spec.Capacity[api.ResourceStorage]
|
||||
aSize := aQty.Value()
|
||||
bSize := bQty.Value()
|
||||
return aSize <= bSize
|
||||
}
|
||||
|
||||
// allPossibleMatchingAccessModes returns an array of AccessMode arrays that
|
||||
// can satisfy a user's requested modes.
|
||||
//
|
||||
// see comments in the Find func above regarding indexing.
|
||||
//
|
||||
// allPossibleMatchingAccessModes gets all stringified accessmodes from the
|
||||
// index and returns all those that contain at least all of the requested
|
||||
// mode.
|
||||
//
|
||||
// For example, assume the index contains 2 types of PVs where the stringified
|
||||
// accessmodes are:
|
||||
//
|
||||
// "RWO,ROX" -- some number of GCEPDs
|
||||
// "RWO,ROX,RWX" -- some number of NFS volumes
|
||||
//
|
||||
// A request for RWO could be satisfied by both sets of indexed volumes, so
|
||||
// allPossibleMatchingAccessModes returns:
|
||||
//
|
||||
// [][]api.PersistentVolumeAccessMode {
|
||||
// []api.PersistentVolumeAccessMode {
|
||||
// api.ReadWriteOnce, api.ReadOnlyMany,
|
||||
// },
|
||||
// []api.PersistentVolumeAccessMode {
|
||||
// api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// A request for RWX can be satisfied by only one set of indexed volumes, so
|
||||
// the return is:
|
||||
//
|
||||
// [][]api.PersistentVolumeAccessMode {
|
||||
// []api.PersistentVolumeAccessMode {
|
||||
// api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany,
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// This func returns modes with ascending levels of modes to give the user
|
||||
// what is closest to what they actually asked for.
|
||||
func (pvIndex *persistentVolumeOrderedIndex) allPossibleMatchingAccessModes(requestedModes []api.PersistentVolumeAccessMode) [][]api.PersistentVolumeAccessMode {
|
||||
matchedModes := [][]api.PersistentVolumeAccessMode{}
|
||||
keys := pvIndex.store.ListIndexFuncValues("accessmodes")
|
||||
for _, key := range keys {
|
||||
indexedModes := api.GetAccessModesFromString(key)
|
||||
if containedInAll(indexedModes, requestedModes) {
|
||||
matchedModes = append(matchedModes, indexedModes)
|
||||
}
|
||||
}
|
||||
|
||||
// sort by the number of modes in each array with the fewest number of
|
||||
// modes coming first. this allows searching for volumes by the minimum
|
||||
// number of modes required of the possible matches.
|
||||
sort.Sort(byAccessModes{matchedModes})
|
||||
return matchedModes
|
||||
}
|
||||
|
||||
func contains(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool {
|
||||
for _, m := range modes {
|
||||
if m == mode {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func containedInAll(indexedModes []api.PersistentVolumeAccessMode, requestedModes []api.PersistentVolumeAccessMode) bool {
|
||||
for _, mode := range requestedModes {
|
||||
if !contains(indexedModes, mode) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// byAccessModes is used to order access modes by size, with the fewest modes first
|
||||
type byAccessModes struct {
|
||||
modes [][]api.PersistentVolumeAccessMode
|
||||
}
|
||||
|
||||
func (c byAccessModes) Less(i, j int) bool {
|
||||
return len(c.modes[i]) < len(c.modes[j])
|
||||
}
|
||||
|
||||
func (c byAccessModes) Swap(i, j int) {
|
||||
c.modes[i], c.modes[j] = c.modes[j], c.modes[i]
|
||||
}
|
||||
|
||||
func (c byAccessModes) Len() int {
|
||||
return len(c.modes)
|
||||
}
|
||||
|
||||
func claimToClaimKey(claim *api.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
|
||||
}
|
||||
|
||||
func claimrefToClaimKey(claimref *api.ObjectReference) string {
|
||||
return fmt.Sprintf("%s/%s", claimref.Namespace, claimref.Name)
|
||||
}
|
656
pkg/controller/volume/persistentvolume/index_test.go
Normal file
656
pkg/controller/volume/persistentvolume/index_test.go
Normal file
@@ -0,0 +1,656 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
)
|
||||
|
||||
func TestMatchVolume(t *testing.T) {
|
||||
volList := newPersistentVolumeOrderedIndex()
|
||||
for _, pv := range createTestVolumes() {
|
||||
volList.store.Add(pv)
|
||||
}
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedMatch string
|
||||
claim *api.PersistentVolumeClaim
|
||||
}{
|
||||
"successful-match-gce-10": {
|
||||
expectedMatch: "gce-pd-10",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("8G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-match-nfs-5": {
|
||||
expectedMatch: "nfs-5",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce, api.ReadWriteMany},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("5G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-skip-1g-bound-volume": {
|
||||
expectedMatch: "gce-pd-5",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-no-match": {
|
||||
expectedMatch: "",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("999G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-no-match-due-to-label": {
|
||||
expectedMatch: "",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"should-not-exist": "true",
|
||||
},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("999G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-no-match-due-to-size-constraint-with-label-selector": {
|
||||
expectedMatch: "",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"should-exist": "true",
|
||||
},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("20000G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"successful-match-due-with-constraint-and-label-selector": {
|
||||
expectedMatch: "gce-pd-2",
|
||||
claim: &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"should-exist": "true",
|
||||
},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10000G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
volume, err := volList.findBestMatchForClaim(scenario.claim)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error matching volume by claim: %v", err)
|
||||
}
|
||||
if len(scenario.expectedMatch) != 0 && volume == nil {
|
||||
t.Errorf("Expected match but received nil volume for scenario: %s", name)
|
||||
}
|
||||
if len(scenario.expectedMatch) != 0 && volume != nil && string(volume.UID) != scenario.expectedMatch {
|
||||
t.Errorf("Expected %s but got volume %s in scenario %s", scenario.expectedMatch, volume.UID, name)
|
||||
}
|
||||
if len(scenario.expectedMatch) == 0 && volume != nil {
|
||||
t.Errorf("Unexpected match for scenario: %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMatchingWithBoundVolumes(t *testing.T) {
|
||||
volumeIndex := newPersistentVolumeOrderedIndex()
|
||||
// two similar volumes, one is bound
|
||||
pv1 := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-1",
|
||||
Name: "gce001",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany},
|
||||
// this one we're pretending is already bound
|
||||
ClaimRef: &api.ObjectReference{UID: "abc123"},
|
||||
},
|
||||
}
|
||||
|
||||
pv2 := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-2",
|
||||
Name: "gce002",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany},
|
||||
},
|
||||
}
|
||||
|
||||
volumeIndex.store.Add(pv1)
|
||||
volumeIndex.store.Add(pv2)
|
||||
|
||||
claim := &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadOnlyMany, api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volume, err := volumeIndex.findBestMatchForClaim(claim)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error matching volume by claim: %v", err)
|
||||
}
|
||||
if volume == nil {
|
||||
t.Fatalf("Unexpected nil volume. Expected %s", pv2.Name)
|
||||
}
|
||||
if pv2.Name != volume.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", pv2.Name, volume.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListByAccessModes(t *testing.T) {
|
||||
volList := newPersistentVolumeOrderedIndex()
|
||||
for _, pv := range createTestVolumes() {
|
||||
volList.store.Add(pv)
|
||||
}
|
||||
|
||||
volumes, err := volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany})
|
||||
if err != nil {
|
||||
t.Error("Unexpected error retrieving volumes by access modes:", err)
|
||||
}
|
||||
sort.Sort(byCapacity{volumes})
|
||||
|
||||
for i, expected := range []string{"gce-pd-1", "gce-pd-5", "gce-pd-10"} {
|
||||
if string(volumes[i].UID) != expected {
|
||||
t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
}
|
||||
}
|
||||
|
||||
volumes, err = volList.listByAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany, api.ReadWriteMany})
|
||||
if err != nil {
|
||||
t.Error("Unexpected error retrieving volumes by access modes:", err)
|
||||
}
|
||||
sort.Sort(byCapacity{volumes})
|
||||
|
||||
for i, expected := range []string{"nfs-1", "nfs-5", "nfs-10"} {
|
||||
if string(volumes[i].UID) != expected {
|
||||
t.Errorf("Incorrect ordering of persistent volumes. Expected %s but got %s", expected, volumes[i].UID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllPossibleAccessModes(t *testing.T) {
|
||||
index := newPersistentVolumeOrderedIndex()
|
||||
for _, pv := range createTestVolumes() {
|
||||
index.store.Add(pv)
|
||||
}
|
||||
|
||||
// the mock PVs creates contain 2 types of accessmodes: RWO+ROX and RWO+ROW+RWX
|
||||
possibleModes := index.allPossibleMatchingAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteOnce})
|
||||
if len(possibleModes) != 3 {
|
||||
t.Errorf("Expected 3 arrays of modes that match RWO, but got %v", len(possibleModes))
|
||||
}
|
||||
for _, m := range possibleModes {
|
||||
if !contains(m, api.ReadWriteOnce) {
|
||||
t.Errorf("AccessModes does not contain %s", api.ReadWriteOnce)
|
||||
}
|
||||
}
|
||||
|
||||
possibleModes = index.allPossibleMatchingAccessModes([]api.PersistentVolumeAccessMode{api.ReadWriteMany})
|
||||
if len(possibleModes) != 1 {
|
||||
t.Errorf("Expected 1 array of modes that match RWX, but got %v", len(possibleModes))
|
||||
}
|
||||
if !contains(possibleModes[0], api.ReadWriteMany) {
|
||||
t.Errorf("AccessModes does not contain %s", api.ReadWriteOnce)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestFindingVolumeWithDifferentAccessModes(t *testing.T) {
|
||||
gce := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{UID: "001", Name: "gce"},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ebs := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{UID: "002", Name: "ebs"},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
nfs := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{UID: "003", Name: "nfs"},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{NFS: &api.NFSVolumeSource{}},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
api.ReadWriteMany,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
claim := &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1G")}},
|
||||
},
|
||||
}
|
||||
|
||||
index := newPersistentVolumeOrderedIndex()
|
||||
index.store.Add(gce)
|
||||
index.store.Add(ebs)
|
||||
index.store.Add(nfs)
|
||||
|
||||
volume, _ := index.findBestMatchForClaim(claim)
|
||||
if volume.Name != ebs.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", ebs.Name, volume.Name)
|
||||
}
|
||||
|
||||
claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce, api.ReadOnlyMany}
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != gce.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name)
|
||||
}
|
||||
|
||||
// order of the requested modes should not matter
|
||||
claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteMany, api.ReadWriteOnce, api.ReadOnlyMany}
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != nfs.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name)
|
||||
}
|
||||
|
||||
// fewer modes requested should still match
|
||||
claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteMany}
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != nfs.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name)
|
||||
}
|
||||
|
||||
// pretend the exact match is bound. should get the next level up of modes.
|
||||
ebs.Spec.ClaimRef = &api.ObjectReference{}
|
||||
claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != gce.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name)
|
||||
}
|
||||
|
||||
// continue up the levels of modes.
|
||||
gce.Spec.ClaimRef = &api.ObjectReference{}
|
||||
claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != nfs.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", nfs.Name, volume.Name)
|
||||
}
|
||||
|
||||
// partial mode request
|
||||
gce.Spec.ClaimRef = nil
|
||||
claim.Spec.AccessModes = []api.PersistentVolumeAccessMode{api.ReadOnlyMany}
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != gce.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", gce.Name, volume.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func createTestVolumes() []*api.PersistentVolume {
|
||||
// these volumes are deliberately out-of-order to test indexing and sorting
|
||||
return []*api.PersistentVolume{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-10",
|
||||
Name: "gce003",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-20",
|
||||
Name: "gce004",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("20G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
},
|
||||
// this one we're pretending is already bound
|
||||
ClaimRef: &api.ObjectReference{UID: "def456"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "nfs-5",
|
||||
Name: "nfs002",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("5G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
Glusterfs: &api.GlusterfsVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
api.ReadWriteMany,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-1",
|
||||
Name: "gce001",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
},
|
||||
// this one we're pretending is already bound
|
||||
ClaimRef: &api.ObjectReference{UID: "abc123"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "nfs-10",
|
||||
Name: "nfs003",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
Glusterfs: &api.GlusterfsVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
api.ReadWriteMany,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-5",
|
||||
Name: "gce002",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("5G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "nfs-1",
|
||||
Name: "nfs001",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("1G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
Glusterfs: &api.GlusterfsVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
api.ReadOnlyMany,
|
||||
api.ReadWriteMany,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "gce-pd-2",
|
||||
Name: "gce0022",
|
||||
Labels: map[string]string{
|
||||
"should-exist": "true",
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10000G"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{},
|
||||
},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func testVolume(name, size string) *api.PersistentVolume {
|
||||
return &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse(size)},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{}},
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindingPreboundVolumes(t *testing.T) {
|
||||
claim := &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "claim01",
|
||||
Namespace: "myns",
|
||||
SelfLink: testapi.Default.SelfLink("pvc", ""),
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi")}},
|
||||
},
|
||||
}
|
||||
claimRef, err := api.GetReference(claim)
|
||||
if err != nil {
|
||||
t.Errorf("error getting claimRef: %v", err)
|
||||
}
|
||||
|
||||
pv1 := testVolume("pv1", "1Gi")
|
||||
pv5 := testVolume("pv5", "5Gi")
|
||||
pv8 := testVolume("pv8", "8Gi")
|
||||
|
||||
index := newPersistentVolumeOrderedIndex()
|
||||
index.store.Add(pv1)
|
||||
index.store.Add(pv5)
|
||||
index.store.Add(pv8)
|
||||
|
||||
// expected exact match on size
|
||||
volume, _ := index.findBestMatchForClaim(claim)
|
||||
if volume.Name != pv1.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", pv1.Name, volume.Name)
|
||||
}
|
||||
|
||||
// pretend the exact match is pre-bound. should get the next size up.
|
||||
pv1.Spec.ClaimRef = &api.ObjectReference{Name: "foo", Namespace: "bar"}
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != pv5.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", pv5.Name, volume.Name)
|
||||
}
|
||||
|
||||
// pretend the exact match is available but the largest volume is pre-bound to the claim.
|
||||
pv1.Spec.ClaimRef = nil
|
||||
pv8.Spec.ClaimRef = claimRef
|
||||
volume, _ = index.findBestMatchForClaim(claim)
|
||||
if volume.Name != pv8.Name {
|
||||
t.Errorf("Expected %s but got volume %s instead", pv8.Name, volume.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// byCapacity is used to order volumes by ascending storage size
|
||||
type byCapacity struct {
|
||||
volumes []*api.PersistentVolume
|
||||
}
|
||||
|
||||
func (c byCapacity) Less(i, j int) bool {
|
||||
return matchStorageCapacity(c.volumes[i], c.volumes[j])
|
||||
}
|
||||
|
||||
func (c byCapacity) Swap(i, j int) {
|
||||
c.volumes[i], c.volumes[j] = c.volumes[j], c.volumes[i]
|
||||
}
|
||||
|
||||
func (c byCapacity) Len() int {
|
||||
return len(c.volumes)
|
||||
}
|
91
pkg/controller/volume/persistentvolume/options/options.go
Normal file
91
pkg/controller/volume/persistentvolume/options/options.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// VolumeConfigFlags is used to bind CLI flags to variables. This top-level struct contains *all* enumerated
|
||||
// CLI flags meant to configure all volume plugins. From this config, the binary will create many instances
|
||||
// of volume.VolumeConfig which are then passed to the appropriate plugin. The ControllerManager binary is the only
|
||||
// part of the code which knows what plugins are supported and which CLI flags correspond to each plugin.
|
||||
type VolumeConfigFlags struct {
|
||||
PersistentVolumeRecyclerMaximumRetry int
|
||||
PersistentVolumeRecyclerMinimumTimeoutNFS int
|
||||
PersistentVolumeRecyclerPodTemplateFilePathNFS string
|
||||
PersistentVolumeRecyclerIncrementTimeoutNFS int
|
||||
PersistentVolumeRecyclerPodTemplateFilePathHostPath string
|
||||
PersistentVolumeRecyclerMinimumTimeoutHostPath int
|
||||
PersistentVolumeRecyclerIncrementTimeoutHostPath int
|
||||
EnableHostPathProvisioning bool
|
||||
EnableDynamicProvisioning bool
|
||||
}
|
||||
|
||||
type PersistentVolumeControllerOptions struct {
|
||||
PVClaimBinderSyncPeriod time.Duration
|
||||
VolumeConfigFlags VolumeConfigFlags
|
||||
}
|
||||
|
||||
func NewPersistentVolumeControllerOptions() PersistentVolumeControllerOptions {
|
||||
return PersistentVolumeControllerOptions{
|
||||
PVClaimBinderSyncPeriod: 15 * time.Second,
|
||||
VolumeConfigFlags: VolumeConfigFlags{
|
||||
// default values here
|
||||
PersistentVolumeRecyclerMaximumRetry: 3,
|
||||
PersistentVolumeRecyclerMinimumTimeoutNFS: 300,
|
||||
PersistentVolumeRecyclerIncrementTimeoutNFS: 30,
|
||||
PersistentVolumeRecyclerMinimumTimeoutHostPath: 60,
|
||||
PersistentVolumeRecyclerIncrementTimeoutHostPath: 30,
|
||||
EnableHostPathProvisioning: false,
|
||||
EnableDynamicProvisioning: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (o *PersistentVolumeControllerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.DurationVar(&o.PVClaimBinderSyncPeriod, "pvclaimbinder-sync-period", o.PVClaimBinderSyncPeriod,
|
||||
"The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.StringVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS,
|
||||
"pv-recycler-pod-template-filepath-nfs", o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathNFS,
|
||||
"The file path to a pod definition used as a template for NFS persistent volume recycling")
|
||||
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs",
|
||||
o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs",
|
||||
o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.StringVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath",
|
||||
o.VolumeConfigFlags.PersistentVolumeRecyclerPodTemplateFilePathHostPath,
|
||||
"The file path to a pod definition used as a template for HostPath persistent volume recycling. "+
|
||||
"This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath",
|
||||
o.VolumeConfigFlags.PersistentVolumeRecyclerMinimumTimeoutHostPath,
|
||||
"The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath",
|
||||
o.VolumeConfigFlags.PersistentVolumeRecyclerIncrementTimeoutHostPath,
|
||||
"the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. "+
|
||||
"This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.IntVar(&o.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry, "pv-recycler-maximum-retry",
|
||||
o.VolumeConfigFlags.PersistentVolumeRecyclerMaximumRetry,
|
||||
"Maximum number of attempts to recycle or delete a persistent volume")
|
||||
fs.BoolVar(&o.VolumeConfigFlags.EnableHostPathProvisioning, "enable-hostpath-provisioner", o.VolumeConfigFlags.EnableHostPathProvisioning,
|
||||
"Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. "+
|
||||
"HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
|
||||
fs.BoolVar(&o.VolumeConfigFlags.EnableDynamicProvisioning, "enable-dynamic-provisioning", o.VolumeConfigFlags.EnableDynamicProvisioning,
|
||||
"Enable dynamic provisioning for environments that support it.")
|
||||
}
|
265
pkg/controller/volume/persistentvolume/provision_test.go
Normal file
265
pkg/controller/volume/persistentvolume/provision_test.go
Normal file
@@ -0,0 +1,265 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting provisioning to happen.
|
||||
// 1. Fill in the controller with initial data
|
||||
// 2. Call the syncVolume *once*.
|
||||
// 3. Compare resulting volumes with expected volumes.
|
||||
func TestProvisionSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// Provision a volume
|
||||
"11-1 - successful provision",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid11-1", "1Gi", "uid11-1", "claim11-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim11-1", "uid11-1", "1Gi", "", api.ClaimPending, annClass),
|
||||
noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision failure - plugin not found
|
||||
"11-2 - plugin not found",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-2", "uid11-2", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed"}, noerrors,
|
||||
testSyncClaim,
|
||||
},
|
||||
{
|
||||
// Provision failure - newProvisioner returns error
|
||||
"11-3 - newProvisioner failure",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-3", "uid11-3", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision failure - Provision returns error
|
||||
"11-4 - provision failure",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-4", "uid11-4", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationProvision, []error{errors.New("Moc provisioner error")}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision success - there is already a volume available, still
|
||||
// we provision a new one when requested.
|
||||
"11-6 - provisioning when there is a volume available",
|
||||
newVolumeArray("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume11-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("pvc-uid11-6", "1Gi", "uid11-6", "claim11-6", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
},
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim11-6", "uid11-6", "1Gi", "", api.ClaimPending, annClass),
|
||||
noevents, noerrors,
|
||||
// No provisioning plugin confingure - makes the test fail when
|
||||
// the controller errorneously tries to provision something
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision success? - claim is bound before provisioner creates
|
||||
// a volume.
|
||||
"11-7 - claim is bound before provisioning",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass),
|
||||
// The claim would be bound in next syncClaim
|
||||
newClaimArray("claim11-7", "uid11-7", "1Gi", "", api.ClaimPending, annClass),
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationProvision, []error{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Create a volume before provisionClaimOperation starts.
|
||||
// This similates a parallel controller provisioning the volume.
|
||||
reactor.lock.Lock()
|
||||
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned)
|
||||
reactor.volumes[volume.Name] = volume
|
||||
reactor.lock.Unlock()
|
||||
}),
|
||||
},
|
||||
{
|
||||
// Provision success - cannot save provisioned PV once,
|
||||
// second retry succeeds
|
||||
"11-8 - cannot save provisioned volume",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid11-8", "1Gi", "uid11-8", "claim11-8", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim11-8", "uid11-8", "1Gi", "", api.ClaimPending, annClass),
|
||||
noevents,
|
||||
[]reactorError{
|
||||
// Inject error to the first
|
||||
// kubeclient.PersistentVolumes.Create() call. All other calls
|
||||
// will succeed.
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error")},
|
||||
},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision success? - cannot save provisioned PV five times,
|
||||
// volume is deleted and delete succeeds
|
||||
"11-9 - cannot save provisioned volume, delete succeeds",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-9", "uid11-9", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed"},
|
||||
[]reactorError{
|
||||
// Inject error to five kubeclient.PersistentVolumes.Create()
|
||||
// calls
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error1")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error2")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error3")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error4")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
wrapTestWithControllerConfig(operationDelete, []error{nil},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim)),
|
||||
},
|
||||
{
|
||||
// Provision failure - cannot save provisioned PV five times,
|
||||
// volume delete failed - no plugin found
|
||||
"11-10 - cannot save provisioned volume, no delete plugin found",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-10", "uid11-10", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
|
||||
[]reactorError{
|
||||
// Inject error to five kubeclient.PersistentVolumes.Create()
|
||||
// calls
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error1")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error2")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error3")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error4")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
// No deleteCalls are configured, which results into no deleter plugin available for the volume
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
},
|
||||
{
|
||||
// Provision failure - cannot save provisioned PV five times,
|
||||
// volume delete failed - deleter returns error five times
|
||||
"11-11 - cannot save provisioned volume, deleter fails",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-11", "uid11-11", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed", "Warning ProvisioningCleanupFailed"},
|
||||
[]reactorError{
|
||||
// Inject error to five kubeclient.PersistentVolumes.Create()
|
||||
// calls
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error1")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error2")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error3")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error4")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
wrapTestWithControllerConfig(
|
||||
operationDelete, []error{
|
||||
errors.New("Mock deletion error1"),
|
||||
errors.New("Mock deletion error2"),
|
||||
errors.New("Mock deletion error3"),
|
||||
errors.New("Mock deletion error4"),
|
||||
errors.New("Mock deletion error5"),
|
||||
},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
),
|
||||
},
|
||||
{
|
||||
// Provision failure - cannot save provisioned PV five times,
|
||||
// volume delete succeeds 2nd time
|
||||
"11-12 - cannot save provisioned volume, delete succeeds 2nd time",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass),
|
||||
newClaimArray("claim11-12", "uid11-12", "1Gi", "", api.ClaimPending, annClass),
|
||||
[]string{"Warning ProvisioningFailed"},
|
||||
[]reactorError{
|
||||
// Inject error to five kubeclient.PersistentVolumes.Create()
|
||||
// calls
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error1")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error2")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error3")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error4")},
|
||||
{"create", "persistentvolumes", errors.New("Mock creation error5")},
|
||||
},
|
||||
wrapTestWithControllerConfig(
|
||||
operationDelete, []error{
|
||||
errors.New("Mock deletion error1"),
|
||||
nil,
|
||||
},
|
||||
wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
),
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
// volume/claims. The test follows this pattern:
|
||||
// 0. Load the controller with initial data.
|
||||
// 1. Call controllerTest.testCall() once as in TestSync()
|
||||
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
|
||||
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
|
||||
// events). Go to 2. if these calls change anything.
|
||||
// 3. When all changes are processed and no new changes were made, call
|
||||
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
|
||||
// 4. If some changes were done by step 3., go to 2. (simulation of
|
||||
// "volume/claim updated" events, eventually performing step 3. again)
|
||||
// 5. When 3. does not do any changes, finish the tests and compare final set
|
||||
// of volumes/claims with expected claims/volumes and report differences.
|
||||
// Some limit of calls in enforced to prevent endless loops.
|
||||
func TestProvisionMultiSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// Provision a volume with binding
|
||||
"12-1 - successful provision",
|
||||
novolumes,
|
||||
newVolumeArray("pvc-uid12-1", "1Gi", "uid12-1", "claim12-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController, annDynamicallyProvisioned),
|
||||
newClaimArray("claim12-1", "uid12-1", "1Gi", "", api.ClaimPending, annClass),
|
||||
// Binding will be completed in the next syncClaim
|
||||
newClaimArray("claim12-1", "uid12-1", "1Gi", "pvc-uid12-1", api.ClaimBound, annClass, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, wrapTestWithControllerConfig(operationProvision, []error{nil}, testSyncClaim),
|
||||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
}
|
||||
|
||||
// When provisioning is disabled, provisioning a claim should instantly return nil
|
||||
func TestDisablingDynamicProvisioner(t *testing.T) {
|
||||
ctrl := newTestController(nil, nil, nil, false)
|
||||
retVal := ctrl.provisionClaim(nil)
|
||||
if retVal != nil {
|
||||
t.Errorf("Expected nil return but got %v", retVal)
|
||||
}
|
||||
}
|
196
pkg/controller/volume/persistentvolume/recycle_test.go
Normal file
196
pkg/controller/volume/persistentvolume/recycle_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
// 1. Fill in the controller with initial data
|
||||
// 2. Call the syncVolume *once*.
|
||||
// 3. Compare resulting volumes with expected volumes.
|
||||
func TestRecycleSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// recycle volume bound by controller
|
||||
"6-1 - successful recycle",
|
||||
newVolumeArray("volume6-1", "1Gi", "uid6-1", "claim6-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController),
|
||||
newVolumeArray("volume6-1", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
// Inject recycler into the controller and call syncVolume. The
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle volume bound by user
|
||||
"6-2 - successful recycle with prebound volume",
|
||||
newVolumeArray("volume6-2", "1Gi", "uid6-2", "claim6-2", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
newVolumeArray("volume6-2", "1Gi", "", "claim6-2", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
// Inject recycler into the controller and call syncVolume. The
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle failure - plugin not found
|
||||
"6-3 - plugin not found",
|
||||
newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
withMessage("No recycler plugin found for the volume!", newVolumeArray("volume6-3", "1Gi", "uid6-3", "claim6-3", api.VolumeFailed, api.PersistentVolumeReclaimRecycle)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// recycle failure - newRecycler returns error
|
||||
"6-4 - newRecycler returns error",
|
||||
newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
withMessage("Failed to create recycler: Mock plugin error: no recycleCalls configured", newVolumeArray("volume6-4", "1Gi", "uid6-4", "claim6-4", api.VolumeFailed, api.PersistentVolumeReclaimRecycle)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle failure - recycle returns error
|
||||
"6-5 - recycle returns error",
|
||||
newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
withMessage("Recycler failed: Mock recycle error", newVolumeArray("volume6-5", "1Gi", "uid6-5", "claim6-5", api.VolumeFailed, api.PersistentVolumeReclaimRecycle)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error")}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// recycle success(?) - volume is deleted before doRecycle() starts
|
||||
"6-6 - volume is deleted before recycling",
|
||||
newVolumeArray("volume6-6", "1Gi", "uid6-6", "claim6-6", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Delete the volume before recycle operation starts
|
||||
reactor.lock.Lock()
|
||||
delete(reactor.volumes, "volume6-6")
|
||||
reactor.lock.Unlock()
|
||||
}),
|
||||
},
|
||||
{
|
||||
// recycle success(?) - volume is recycled by previous recycler just
|
||||
// at the time new doRecycle() starts. This simulates "volume no
|
||||
// longer needs recycling, skipping".
|
||||
"6-7 - volume is deleted before recycling",
|
||||
newVolumeArray("volume6-7", "1Gi", "uid6-7", "claim6-7", api.VolumeBound, api.PersistentVolumeReclaimRecycle, annBoundByController),
|
||||
newVolumeArray("volume6-7", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Mark the volume as Available before the recycler starts
|
||||
reactor.lock.Lock()
|
||||
volume := reactor.volumes["volume6-7"]
|
||||
volume.Spec.ClaimRef = nil
|
||||
volume.Status.Phase = api.VolumeAvailable
|
||||
volume.Annotations = nil
|
||||
reactor.lock.Unlock()
|
||||
}),
|
||||
},
|
||||
{
|
||||
// recycle success(?) - volume bound by user is recycled by previous
|
||||
// recycler just at the time new doRecycle() starts. This simulates
|
||||
// "volume no longer needs recycling, skipping" with volume bound by
|
||||
// user.
|
||||
"6-8 - prebound volume is deleted before recycling",
|
||||
newVolumeArray("volume6-8", "1Gi", "uid6-8", "claim6-8", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
newVolumeArray("volume6-8", "1Gi", "", "claim6-8", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithControllerConfig(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
// Mark the volume as Available before the recycler starts
|
||||
reactor.lock.Lock()
|
||||
volume := reactor.volumes["volume6-8"]
|
||||
volume.Spec.ClaimRef.UID = ""
|
||||
volume.Status.Phase = api.VolumeAvailable
|
||||
reactor.lock.Unlock()
|
||||
}),
|
||||
},
|
||||
{
|
||||
// recycle success - volume bound by user is recycled, while a new
|
||||
// claim is created with another UID.
|
||||
"6-9 - prebound volume is recycled while the claim exists",
|
||||
newVolumeArray("volume6-9", "1Gi", "uid6-9", "claim6-9", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
newVolumeArray("volume6-9", "1Gi", "", "claim6-9", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
|
||||
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim6-9", "uid6-9-x", "10Gi", "", api.ClaimPending),
|
||||
noevents, noerrors,
|
||||
// Inject recycler into the controller and call syncVolume. The
|
||||
// recycler simulates one recycle() call that succeeds.
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{nil}, testSyncVolume),
|
||||
},
|
||||
{
|
||||
// volume has unknown reclaim policy - failure expected
|
||||
"6-10 - unknown reclaim policy",
|
||||
newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeBound, "Unknown"),
|
||||
withMessage("Volume has unrecognized PersistentVolumeReclaimPolicy", newVolumeArray("volume6-10", "1Gi", "uid6-10", "claim6-10", api.VolumeFailed, "Unknown")),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeUnknownReclaimPolicy"}, noerrors, testSyncVolume,
|
||||
},
|
||||
}
|
||||
runSyncTests(t, tests)
|
||||
}
|
||||
|
||||
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
|
||||
// volume/claims. The test follows this pattern:
|
||||
// 0. Load the controller with initial data.
|
||||
// 1. Call controllerTest.testCall() once as in TestSync()
|
||||
// 2. For all volumes/claims changed by previous syncVolume/syncClaim calls,
|
||||
// call appropriate syncVolume/syncClaim (simulating "volume/claim changed"
|
||||
// events). Go to 2. if these calls change anything.
|
||||
// 3. When all changes are processed and no new changes were made, call
|
||||
// syncVolume/syncClaim on all volumes/claims (simulating "periodic sync").
|
||||
// 4. If some changes were done by step 3., go to 2. (simulation of
|
||||
// "volume/claim updated" events, eventually performing step 3. again)
|
||||
// 5. When 3. does not do any changes, finish the tests and compare final set
|
||||
// of volumes/claims with expected claims/volumes and report differences.
|
||||
// Some limit of calls in enforced to prevent endless loops.
|
||||
func TestRecycleMultiSync(t *testing.T) {
|
||||
tests := []controllerTest{
|
||||
{
|
||||
// recycle failure - recycle returns error. The controller should
|
||||
// try again.
|
||||
"7-1 - recycle returns error",
|
||||
newVolumeArray("volume7-1", "1Gi", "uid7-1", "claim7-1", api.VolumeBound, api.PersistentVolumeReclaimRecycle),
|
||||
newVolumeArray("volume7-1", "1Gi", "", "claim7-1", api.VolumeAvailable, api.PersistentVolumeReclaimRecycle),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedRecycle"}, noerrors,
|
||||
wrapTestWithControllerConfig(operationRecycle, []error{errors.New("Mock recycle error"), nil}, testSyncVolume),
|
||||
},
|
||||
}
|
||||
|
||||
runMultisyncTests(t, tests)
|
||||
}
|
82
pkg/controller/volume/persistentvolume/volume_host.go
Normal file
82
pkg/controller/volume/persistentvolume/volume_host.go
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
// VolumeHost interface implementation for PersistentVolumeController.
|
||||
|
||||
var _ vol.VolumeHost = &PersistentVolumeController{}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPluginDir(pluginName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetPodPluginDir(podUID types.UID, pluginName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetKubeClient() clientset.Interface {
|
||||
return ctrl.kubeClient
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) NewWrapperMounter(volName string, spec vol.Spec, pod *api.Pod, opts vol.VolumeOptions) (vol.Mounter, error) {
|
||||
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) NewWrapperUnmounter(volName string, spec vol.Spec, podUID types.UID) (vol.Unmounter, error) {
|
||||
return nil, fmt.Errorf("PersistentVolumeController.NewWrapperMounter is not implemented")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetCloudProvider() cloudprovider.Interface {
|
||||
return ctrl.cloud
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetMounter() mount.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetWriter() io.Writer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetHostIP() (net.IP, error) {
|
||||
return nil, fmt.Errorf("PersistentVolumeController.GetHostIP() is not implemented")
|
||||
}
|
||||
|
||||
func (ctrl *PersistentVolumeController) GetRootContext() string {
|
||||
return ""
|
||||
}
|
Reference in New Issue
Block a user