volume: use contextual logging

This commit is contained in:
杨军10092085 2022-11-03 17:19:04 +08:00
parent b740a34302
commit 361e4ff0fa
61 changed files with 1326 additions and 1151 deletions

View File

@ -271,7 +271,8 @@ func startPersistentVolumeBinderController(ctx context.Context, controllerContex
EnableDynamicProvisioning: controllerContext.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration.EnableDynamicProvisioning, EnableDynamicProvisioning: controllerContext.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration.EnableDynamicProvisioning,
FilteredDialOptions: filteredDialOptions, FilteredDialOptions: filteredDialOptions,
} }
volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params) ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-binder-controller"))
volumeController, volumeControllerErr := persistentvolumecontroller.NewController(ctx, params)
if volumeControllerErr != nil { if volumeControllerErr != nil {
return nil, true, fmt.Errorf("failed to construct persistentvolume controller: %v", volumeControllerErr) return nil, true, fmt.Errorf("failed to construct persistentvolume controller: %v", volumeControllerErr)
} }
@ -295,8 +296,11 @@ func startAttachDetachController(ctx context.Context, controllerContext Controll
return nil, true, err return nil, true, err
} }
logger := klog.LoggerWithName(klog.FromContext(ctx), "attachdetach-controller")
ctx = klog.NewContext(ctx, logger)
attachDetachController, attachDetachControllerErr := attachDetachController, attachDetachControllerErr :=
attachdetach.NewAttachDetachController( attachdetach.NewAttachDetachController(
logger,
controllerContext.ClientBuilder.ClientOrDie("attachdetach-controller"), controllerContext.ClientBuilder.ClientOrDie("attachdetach-controller"),
controllerContext.InformerFactory.Core().V1().Pods(), controllerContext.InformerFactory.Core().V1().Pods(),
controllerContext.InformerFactory.Core().V1().Nodes(), controllerContext.InformerFactory.Core().V1().Nodes(),
@ -316,7 +320,7 @@ func startAttachDetachController(ctx context.Context, controllerContext Controll
if attachDetachControllerErr != nil { if attachDetachControllerErr != nil {
return nil, true, fmt.Errorf("failed to start attach/detach controller: %v", attachDetachControllerErr) return nil, true, fmt.Errorf("failed to start attach/detach controller: %v", attachDetachControllerErr)
} }
go attachDetachController.Run(ctx.Done()) go attachDetachController.Run(ctx)
return nil, true, nil return nil, true, nil
} }
@ -346,12 +350,14 @@ func startVolumeExpandController(ctx context.Context, controllerContext Controll
if expandControllerErr != nil { if expandControllerErr != nil {
return nil, true, fmt.Errorf("failed to start volume expand controller: %v", expandControllerErr) return nil, true, fmt.Errorf("failed to start volume expand controller: %v", expandControllerErr)
} }
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-expander-controller"))
go expandController.Run(ctx) go expandController.Run(ctx)
return nil, true, nil return nil, true, nil
} }
func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "ephemeral-volume-controller"))
ephemeralController, err := ephemeral.NewController( ephemeralController, err := ephemeral.NewController(
controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"), controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"),
controllerContext.InformerFactory.Core().V1().Pods(), controllerContext.InformerFactory.Core().V1().Pods(),
@ -548,7 +554,9 @@ func startGarbageCollectorController(ctx context.Context, controllerContext Cont
} }
func startPVCProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { func startPVCProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolumeclaim-protection-controller"))
pvcProtectionController, err := pvcprotection.NewPVCProtectionController( pvcProtectionController, err := pvcprotection.NewPVCProtectionController(
klog.FromContext(ctx),
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(), controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
controllerContext.InformerFactory.Core().V1().Pods(), controllerContext.InformerFactory.Core().V1().Pods(),
controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"), controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"),
@ -561,7 +569,9 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control
} }
func startPVProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { func startPVProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-protection-controller"))
go pvprotection.NewPVProtectionController( go pvprotection.NewPVProtectionController(
klog.FromContext(ctx),
controllerContext.InformerFactory.Core().V1().PersistentVolumes(), controllerContext.InformerFactory.Core().V1().PersistentVolumes(),
controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"), controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"),
).Run(ctx, 1) ).Run(ctx, 1)

View File

@ -384,7 +384,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
} }
func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) error { func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) error {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "claim", klog.KRef(namespace, name)) logger := klog.LoggerWithValues(klog.FromContext(ctx), "PVC", klog.KRef(namespace, name))
ctx = klog.NewContext(ctx, logger) ctx = klog.NewContext(ctx, logger)
claim, err := ec.claimLister.ResourceClaims(namespace).Get(name) claim, err := ec.claimLister.ResourceClaims(namespace).Get(name)
if err != nil { if err != nil {

View File

@ -19,6 +19,7 @@ limitations under the License.
package attachdetach package attachdetach
import ( import (
"context"
"fmt" "fmt"
"net" "net"
"time" "time"
@ -99,12 +100,13 @@ var DefaultTimerConfig = TimerConfig{
// AttachDetachController defines the operations supported by this controller. // AttachDetachController defines the operations supported by this controller.
type AttachDetachController interface { type AttachDetachController interface {
Run(stopCh <-chan struct{}) Run(ctx context.Context)
GetDesiredStateOfWorld() cache.DesiredStateOfWorld GetDesiredStateOfWorld() cache.DesiredStateOfWorld
} }
// NewAttachDetachController returns a new instance of AttachDetachController. // NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController( func NewAttachDetachController(
logger klog.Logger,
kubeClient clientset.Interface, kubeClient clientset.Interface,
podInformer coreinformers.PodInformer, podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer, nodeInformer coreinformers.NodeInformer,
@ -194,9 +196,15 @@ func NewAttachDetachController(
adc.intreeToCSITranslator) adc.intreeToCSITranslator)
podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.podAdd, AddFunc: func(obj interface{}) {
UpdateFunc: adc.podUpdate, adc.podAdd(logger, obj)
DeleteFunc: adc.podDelete, },
UpdateFunc: func(oldObj, newObj interface{}) {
adc.podUpdate(logger, oldObj, newObj)
},
DeleteFunc: func(obj interface{}) {
adc.podDelete(logger, obj)
},
}) })
// This custom indexer will index pods by its PVC keys. Then we don't need // This custom indexer will index pods by its PVC keys. Then we don't need
@ -206,9 +214,15 @@ func NewAttachDetachController(
} }
nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.nodeAdd, AddFunc: func(obj interface{}) {
UpdateFunc: adc.nodeUpdate, adc.nodeAdd(logger, obj)
DeleteFunc: adc.nodeDelete, },
UpdateFunc: func(oldObj, newObj interface{}) {
adc.nodeUpdate(logger, oldObj, newObj)
},
DeleteFunc: func(obj interface{}) {
adc.nodeDelete(logger, obj)
},
}) })
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
@ -316,7 +330,7 @@ type attachDetachController struct {
filteredDialOptions *proxyutil.FilteredDialOptions filteredDialOptions *proxyutil.FilteredDialOptions
} }
func (adc *attachDetachController) Run(stopCh <-chan struct{}) { func (adc *attachDetachController) Run(ctx context.Context) {
defer runtime.HandleCrash() defer runtime.HandleCrash()
defer adc.pvcQueue.ShutDown() defer adc.pvcQueue.ShutDown()
@ -325,8 +339,9 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
adc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: adc.kubeClient.CoreV1().Events("")}) adc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: adc.kubeClient.CoreV1().Events("")})
defer adc.broadcaster.Shutdown() defer adc.broadcaster.Shutdown()
klog.Infof("Starting attach detach controller") logger := klog.FromContext(ctx)
defer klog.Infof("Shutting down attach detach controller") logger.Info("Starting attach detach controller")
defer logger.Info("Shutting down attach detach controller")
synced := []kcache.InformerSynced{adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced} synced := []kcache.InformerSynced{adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced}
if adc.csiNodeSynced != nil { if adc.csiNodeSynced != nil {
@ -339,21 +354,21 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
synced = append(synced, adc.volumeAttachmentSynced) synced = append(synced, adc.volumeAttachmentSynced)
} }
if !kcache.WaitForNamedCacheSync("attach detach", stopCh, synced...) { if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(), synced...) {
return return
} }
err := adc.populateActualStateOfWorld() err := adc.populateActualStateOfWorld(logger)
if err != nil { if err != nil {
klog.Errorf("Error populating the actual state of world: %v", err) logger.Error(err, "Error populating the actual state of world")
} }
err = adc.populateDesiredStateOfWorld() err = adc.populateDesiredStateOfWorld(logger)
if err != nil { if err != nil {
klog.Errorf("Error populating the desired state of world: %v", err) logger.Error(err, "Error populating the desired state of world")
} }
go adc.reconciler.Run(stopCh) go adc.reconciler.Run(ctx)
go adc.desiredStateOfWorldPopulator.Run(stopCh) go adc.desiredStateOfWorldPopulator.Run(ctx)
go wait.Until(adc.pvcWorker, time.Second, stopCh) go wait.UntilWithContext(ctx, adc.pvcWorker, time.Second)
metrics.Register(adc.pvcLister, metrics.Register(adc.pvcLister,
adc.pvLister, adc.pvLister,
adc.podLister, adc.podLister,
@ -363,11 +378,11 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
adc.csiMigratedPluginManager, adc.csiMigratedPluginManager,
adc.intreeToCSITranslator) adc.intreeToCSITranslator)
<-stopCh <-ctx.Done()
} }
func (adc *attachDetachController) populateActualStateOfWorld() error { func (adc *attachDetachController) populateActualStateOfWorld(logger klog.Logger) error {
klog.V(5).Infof("Populating ActualStateOfworld") logger.V(5).Info("Populating ActualStateOfworld")
nodes, err := adc.nodeLister.List(labels.Everything()) nodes, err := adc.nodeLister.List(labels.Everything())
if err != nil { if err != nil {
return err return err
@ -382,18 +397,18 @@ func (adc *attachDetachController) populateActualStateOfWorld() error {
// volume spec is not needed to detach a volume. If the volume is used by a pod, it // volume spec is not needed to detach a volume. If the volume is used by a pod, it
// its spec can be: this would happen during in the populateDesiredStateOfWorld which // its spec can be: this would happen during in the populateDesiredStateOfWorld which
// scans the pods and updates their volumes in the ActualStateOfWorld too. // scans the pods and updates their volumes in the ActualStateOfWorld too.
err = adc.actualStateOfWorld.MarkVolumeAsAttached(uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath) err = adc.actualStateOfWorld.MarkVolumeAsAttached(logger, uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath)
if err != nil { if err != nil {
klog.Errorf("Failed to mark the volume as attached: %v", err) logger.Error(err, "Failed to mark the volume as attached")
continue continue
} }
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse)
adc.addNodeToDswp(node, types.NodeName(node.Name)) adc.addNodeToDswp(node, types.NodeName(node.Name))
} }
} }
err = adc.processVolumeAttachments() err = adc.processVolumeAttachments(logger)
if err != nil { if err != nil {
klog.Errorf("Failed to process volume attachments: %v", err) logger.Error(err, "Failed to process volume attachments")
} }
return err return err
} }
@ -420,8 +435,8 @@ func (adc *attachDetachController) getNodeVolumeDevicePath(
return devicePath, err return devicePath, err
} }
func (adc *attachDetachController) populateDesiredStateOfWorld() error { func (adc *attachDetachController) populateDesiredStateOfWorld(logger klog.Logger) error {
klog.V(5).Infof("Populating DesiredStateOfworld") logger.V(5).Info("Populating DesiredStateOfworld")
pods, err := adc.podLister.List(labels.Everything()) pods, err := adc.podLister.List(labels.Everything())
if err != nil { if err != nil {
@ -429,56 +444,52 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
} }
for _, pod := range pods { for _, pod := range pods {
podToAdd := pod podToAdd := pod
adc.podAdd(podToAdd) adc.podAdd(logger, podToAdd)
for _, podVolume := range podToAdd.Spec.Volumes { for _, podVolume := range podToAdd.Spec.Volumes {
nodeName := types.NodeName(podToAdd.Spec.NodeName) nodeName := types.NodeName(podToAdd.Spec.NodeName)
// The volume specs present in the ActualStateOfWorld are nil, let's replace those // The volume specs present in the ActualStateOfWorld are nil, let's replace those
// with the correct ones found on pods. The present in the ASW with no corresponding // with the correct ones found on pods. The present in the ASW with no corresponding
// pod will be detached and the spec is irrelevant. // pod will be detached and the spec is irrelevant.
volumeSpec, err := util.CreateVolumeSpec(podVolume, podToAdd, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) volumeSpec, err := util.CreateVolumeSpec(logger, podVolume, podToAdd, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
if err != nil { if err != nil {
klog.Errorf( logger.Error(
"Error creating spec for volume %q, pod %q/%q: %v", err,
podVolume.Name, "Error creating spec for volume of pod",
podToAdd.Namespace, "pod", klog.KObj(podToAdd),
podToAdd.Name, "volumeName", podVolume.Name)
err)
continue continue
} }
plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil { if err != nil || plugin == nil {
klog.V(10).Infof( logger.V(10).Info(
"Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v", "Skipping volume for pod: it does not implement attacher interface",
podVolume.Name, "pod", klog.KObj(podToAdd),
podToAdd.Namespace, "volumeName", podVolume.Name,
podToAdd.Name, "err", err)
err)
continue continue
} }
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil { if err != nil {
klog.Errorf( logger.Error(
"Failed to find unique name for volume %q, pod %q/%q: %v", err,
podVolume.Name, "Failed to find unique name for volume of pod",
podToAdd.Namespace, "pod", klog.KObj(podToAdd),
podToAdd.Name, "volumeName", podVolume.Name)
err)
continue continue
} }
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName) attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateAttached { if attachState == cache.AttachStateAttached {
klog.V(10).Infof("Volume %q is attached to node %q. Marking as attached in ActualStateOfWorld", logger.V(10).Info("Volume is attached to node. Marking as attached in ActualStateOfWorld",
volumeName, "node", klog.KRef("", string(nodeName)),
nodeName, "volumeName", volumeName)
)
devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName) devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName)
if err != nil { if err != nil {
klog.Errorf("Failed to find device path: %v", err) logger.Error(err, "Failed to find device path")
continue continue
} }
err = adc.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath) err = adc.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, devicePath)
if err != nil { if err != nil {
klog.Errorf("Failed to update volume spec for node %s: %v", nodeName, err) logger.Error(err, "Failed to update volume spec for node", "node", klog.KRef("", string(nodeName)))
} }
} }
} }
@ -487,7 +498,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
return nil return nil
} }
func (adc *attachDetachController) podAdd(obj interface{}) { func (adc *attachDetachController) podAdd(logger klog.Logger, obj interface{}) {
pod, ok := obj.(*v1.Pod) pod, ok := obj.(*v1.Pod)
if pod == nil || !ok { if pod == nil || !ok {
return return
@ -502,7 +513,7 @@ func (adc *attachDetachController) podAdd(obj interface{}) {
adc.desiredStateOfWorld, adc.desiredStateOfWorld,
true /* default volume action */) true /* default volume action */)
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */ util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
} }
@ -511,7 +522,7 @@ func (adc *attachDetachController) GetDesiredStateOfWorld() cache.DesiredStateOf
return adc.desiredStateOfWorld return adc.desiredStateOfWorld
} }
func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) { func (adc *attachDetachController) podUpdate(logger klog.Logger, oldObj, newObj interface{}) {
pod, ok := newObj.(*v1.Pod) pod, ok := newObj.(*v1.Pod)
if pod == nil || !ok { if pod == nil || !ok {
return return
@ -526,21 +537,21 @@ func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) {
adc.desiredStateOfWorld, adc.desiredStateOfWorld,
true /* default volume action */) true /* default volume action */)
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */ util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
} }
func (adc *attachDetachController) podDelete(obj interface{}) { func (adc *attachDetachController) podDelete(logger klog.Logger, obj interface{}) {
pod, ok := obj.(*v1.Pod) pod, ok := obj.(*v1.Pod)
if pod == nil || !ok { if pod == nil || !ok {
return return
} }
util.ProcessPodVolumes(pod, false, /* addVolumes */ util.ProcessPodVolumes(logger, pod, false, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
} }
func (adc *attachDetachController) nodeAdd(obj interface{}) { func (adc *attachDetachController) nodeAdd(logger klog.Logger, obj interface{}) {
node, ok := obj.(*v1.Node) node, ok := obj.(*v1.Node)
// TODO: investigate if nodeName is empty then if we can return // TODO: investigate if nodeName is empty then if we can return
// kubernetes/kubernetes/issues/37777 // kubernetes/kubernetes/issues/37777
@ -548,15 +559,15 @@ func (adc *attachDetachController) nodeAdd(obj interface{}) {
return return
} }
nodeName := types.NodeName(node.Name) nodeName := types.NodeName(node.Name)
adc.nodeUpdate(nil, obj) adc.nodeUpdate(logger, nil, obj)
// kubernetes/kubernetes/issues/37586 // kubernetes/kubernetes/issues/37586
// This is to workaround the case when a node add causes to wipe out // This is to workaround the case when a node add causes to wipe out
// the attached volumes field. This function ensures that we sync with // the attached volumes field. This function ensures that we sync with
// the actual status. // the actual status.
adc.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) adc.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName)
} }
func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) { func (adc *attachDetachController) nodeUpdate(logger klog.Logger, oldObj, newObj interface{}) {
node, ok := newObj.(*v1.Node) node, ok := newObj.(*v1.Node)
// TODO: investigate if nodeName is empty then if we can return // TODO: investigate if nodeName is empty then if we can return
if node == nil || !ok { if node == nil || !ok {
@ -565,10 +576,10 @@ func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) {
nodeName := types.NodeName(node.Name) nodeName := types.NodeName(node.Name)
adc.addNodeToDswp(node, nodeName) adc.addNodeToDswp(node, nodeName)
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse)
} }
func (adc *attachDetachController) nodeDelete(obj interface{}) { func (adc *attachDetachController) nodeDelete(logger klog.Logger, obj interface{}) {
node, ok := obj.(*v1.Node) node, ok := obj.(*v1.Node)
if node == nil || !ok { if node == nil || !ok {
return return
@ -577,10 +588,10 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
nodeName := types.NodeName(node.Name) nodeName := types.NodeName(node.Name)
if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil { if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil {
// This might happen during drain, but we still want it to appear in our logs // This might happen during drain, but we still want it to appear in our logs
klog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err) logger.Info("Error removing node from desired-state-of-world", "node", klog.KObj(node), "err", err)
} }
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse)
} }
func (adc *attachDetachController) enqueuePVC(obj interface{}) { func (adc *attachDetachController) enqueuePVC(obj interface{}) {
@ -593,19 +604,19 @@ func (adc *attachDetachController) enqueuePVC(obj interface{}) {
} }
// pvcWorker processes items from pvcQueue // pvcWorker processes items from pvcQueue
func (adc *attachDetachController) pvcWorker() { func (adc *attachDetachController) pvcWorker(ctx context.Context) {
for adc.processNextItem() { for adc.processNextItem(klog.FromContext(ctx)) {
} }
} }
func (adc *attachDetachController) processNextItem() bool { func (adc *attachDetachController) processNextItem(logger klog.Logger) bool {
keyObj, shutdown := adc.pvcQueue.Get() keyObj, shutdown := adc.pvcQueue.Get()
if shutdown { if shutdown {
return false return false
} }
defer adc.pvcQueue.Done(keyObj) defer adc.pvcQueue.Done(keyObj)
if err := adc.syncPVCByKey(keyObj.(string)); err != nil { if err := adc.syncPVCByKey(logger, keyObj.(string)); err != nil {
// Rather than wait for a full resync, re-add the key to the // Rather than wait for a full resync, re-add the key to the
// queue to be processed. // queue to be processed.
adc.pvcQueue.AddRateLimited(keyObj) adc.pvcQueue.AddRateLimited(keyObj)
@ -619,16 +630,16 @@ func (adc *attachDetachController) processNextItem() bool {
return true return true
} }
func (adc *attachDetachController) syncPVCByKey(key string) error { func (adc *attachDetachController) syncPVCByKey(logger klog.Logger, key string) error {
klog.V(5).Infof("syncPVCByKey[%s]", key) logger.V(5).Info("syncPVCByKey", "pvcKey", key)
namespace, name, err := kcache.SplitMetaNamespaceKey(key) namespace, name, err := kcache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {
klog.V(4).Infof("error getting namespace & name of pvc %q to get pvc from informer: %v", key, err) logger.V(4).Info("Error getting namespace & name of pvc to get pvc from informer", "pvcKey", key, "err", err)
return nil return nil
} }
pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name) pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
klog.V(4).Infof("error getting pvc %q from informer: %v", key, err) logger.V(4).Info("Error getting pvc from informer", "pvcKey", key, "err", err)
return nil return nil
} }
if err != nil { if err != nil {
@ -658,7 +669,7 @@ func (adc *attachDetachController) syncPVCByKey(key string) error {
adc.desiredStateOfWorld, adc.desiredStateOfWorld,
true /* default volume action */) true /* default volume action */)
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */ util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
} }
return nil return nil
@ -669,8 +680,8 @@ func (adc *attachDetachController) syncPVCByKey(key string) error {
// corresponding volume in the actual state of the world to indicate that it is // corresponding volume in the actual state of the world to indicate that it is
// mounted. // mounted.
func (adc *attachDetachController) processVolumesInUse( func (adc *attachDetachController) processVolumesInUse(
nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) { logger klog.Logger, nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) {
klog.V(4).Infof("processVolumesInUse for node %q", nodeName) logger.V(4).Info("processVolumesInUse for node", "node", klog.KRef("", string(nodeName)))
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) { for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
mounted := false mounted := false
for _, volumeInUse := range volumesInUse { for _, volumeInUse := range volumesInUse {
@ -679,11 +690,14 @@ func (adc *attachDetachController) processVolumesInUse(
break break
} }
} }
err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted) err := adc.actualStateOfWorld.SetVolumeMountedByNode(logger, attachedVolume.VolumeName, nodeName, mounted)
if err != nil { if err != nil {
klog.Warningf( logger.Info(
"SetVolumeMountedByNode(%q, %q, %v) returned an error: %v", "SetVolumeMountedByNode returned an error",
attachedVolume.VolumeName, nodeName, mounted, err) "node", klog.KRef("", string(nodeName)),
"volumeName", attachedVolume.VolumeName,
"mounted", mounted,
"err", err)
} }
} }
} }
@ -696,10 +710,10 @@ func (adc *attachDetachController) processVolumesInUse(
// //
// if yes, the reconciler will attempt attach on the volume; // if yes, the reconciler will attempt attach on the volume;
// if not (could be a dangling attachment), the reconciler will detach this volume. // if not (could be a dangling attachment), the reconciler will detach this volume.
func (adc *attachDetachController) processVolumeAttachments() error { func (adc *attachDetachController) processVolumeAttachments(logger klog.Logger) error {
vas, err := adc.volumeAttachmentLister.List(labels.Everything()) vas, err := adc.volumeAttachmentLister.List(labels.Everything())
if err != nil { if err != nil {
klog.Errorf("failed to list VolumeAttachment objects: %v", err) logger.Error(err, "Failed to list VolumeAttachment objects")
return err return err
} }
for _, va := range vas { for _, va := range vas {
@ -707,13 +721,12 @@ func (adc *attachDetachController) processVolumeAttachments() error {
pvName := va.Spec.Source.PersistentVolumeName pvName := va.Spec.Source.PersistentVolumeName
if pvName == nil { if pvName == nil {
// Currently VA objects are created for CSI volumes only. nil pvName is unexpected, generate a warning // Currently VA objects are created for CSI volumes only. nil pvName is unexpected, generate a warning
klog.Warningf("Skipping the va as its pvName is nil, va.Name: %q, nodeName: %q", logger.Info("Skipping the va as its pvName is nil", "node", klog.KRef("", string(nodeName)), "vaName", va.Name)
va.Name, nodeName)
continue continue
} }
pv, err := adc.pvLister.Get(*pvName) pv, err := adc.pvLister.Get(*pvName)
if err != nil { if err != nil {
klog.Errorf("Unable to lookup pv object for: %q, err: %v", *pvName, err) logger.Error(err, "Unable to lookup pv object", "PV", klog.KRef("", *pvName))
continue continue
} }
@ -730,13 +743,7 @@ func (adc *attachDetachController) processVolumeAttachments() error {
// podNamespace is not needed here for Azurefile as the volumeName generated will be the same with or without podNamespace // podNamespace is not needed here for Azurefile as the volumeName generated will be the same with or without podNamespace
volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, "" /* podNamespace */, adc.intreeToCSITranslator) volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, "" /* podNamespace */, adc.intreeToCSITranslator)
if err != nil { if err != nil {
klog.Errorf( logger.Error(err, "Failed to translate intree volumeSpec to CSI volumeSpec for volume", "node", klog.KRef("", string(nodeName)), "inTreePluginName", inTreePluginName, "vaName", va.Name, "PV", klog.KRef("", *pvName))
"Failed to translate intree volumeSpec to CSI volumeSpec for volume:%q, va.Name:%q, nodeName:%q: %s. Error: %v",
*pvName,
va.Name,
nodeName,
inTreePluginName,
err)
continue continue
} }
} }
@ -746,32 +753,22 @@ func (adc *attachDetachController) processVolumeAttachments() error {
plugin, err = adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) plugin, err = adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil { if err != nil || plugin == nil {
// Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning // Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning
klog.Warningf( logger.Info("Skipping processing the volume on node, no attacher interface found", "node", klog.KRef("", string(nodeName)), "PV", klog.KRef("", *pvName), "err", err)
"Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v",
*pvName,
nodeName,
err)
continue continue
} }
} }
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil { if err != nil {
klog.Errorf( logger.Error(err, "Failed to find unique name for volume", "node", klog.KRef("", string(nodeName)), "vaName", va.Name, "PV", klog.KRef("", *pvName))
"Failed to find unique name for volume:%q, va.Name:%q, nodeName:%q: %v",
*pvName,
va.Name,
nodeName,
err)
continue continue
} }
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName) attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateDetached { if attachState == cache.AttachStateDetached {
klog.V(1).Infof("Marking volume attachment as uncertain as volume:%q (%q) is not attached (%v)", logger.V(1).Info("Marking volume attachment as uncertain as volume is not attached", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName, "attachState", attachState)
volumeName, nodeName, attachState) err = adc.actualStateOfWorld.MarkVolumeAsUncertain(logger, volumeName, volumeSpec, nodeName)
err = adc.actualStateOfWorld.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName)
if err != nil { if err != nil {
klog.Errorf("MarkVolumeAsUncertain fail to add the volume %q (%q) to ASW. err: %s", volumeName, nodeName, err) logger.Error(err, "MarkVolumeAsUncertain fail to add the volume to ASW", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName)
} }
} }
} }
@ -887,7 +884,7 @@ func (adc *attachDetachController) GetServiceAccountTokenFunc() func(_, _ string
func (adc *attachDetachController) DeleteServiceAccountTokenFunc() func(types.UID) { func (adc *attachDetachController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) { return func(types.UID) {
klog.Errorf("DeleteServiceAccountToken unsupported in attachDetachController") klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in attachDetachController")
} }
} }

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
kcache "k8s.io/client-go/tools/cache" kcache "k8s.io/client-go/tools/cache"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
@ -47,7 +48,9 @@ func Test_NewAttachDetachController_Positive(t *testing.T) {
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
// Act // Act
logger, _ := ktesting.NewTestContext(t)
_, err := NewAttachDetachController( _, err := NewAttachDetachController(
logger,
fakeKubeClient, fakeKubeClient,
informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(), informerFactory.Core().V1().Nodes(),
@ -107,12 +110,13 @@ func Test_AttachDetachControllerStateOfWolrdPopulators_Positive(t *testing.T) {
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr) adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr) adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
err := adc.populateActualStateOfWorld() logger, _ := ktesting.NewTestContext(t)
err := adc.populateActualStateOfWorld(logger)
if err != nil { if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err) t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
} }
err = adc.populateDesiredStateOfWorld() err = adc.populateDesiredStateOfWorld(logger)
if err != nil { if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
} }
@ -172,7 +176,11 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
var podsNum, extraPodsNum, nodesNum, i int var podsNum, extraPodsNum, nodesNum, i int
// Create the controller // Create the controller
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
adcObj, err := NewAttachDetachController( adcObj, err := NewAttachDetachController(
logger,
fakeKubeClient, fakeKubeClient,
informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(), informerFactory.Core().V1().Nodes(),
@ -196,8 +204,6 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
adc := adcObj.(*attachDetachController) adc := adcObj.(*attachDetachController)
stopCh := make(chan struct{})
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
if err != nil { if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
@ -227,9 +233,9 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
csiNodeInformer.GetIndexer().Add(&csiNodeToAdd) csiNodeInformer.GetIndexer().Add(&csiNodeToAdd)
} }
informerFactory.Start(stopCh) informerFactory.Start(ctx.Done())
if !kcache.WaitForNamedCacheSync("attach detach", stopCh, if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(),
informerFactory.Core().V1().Pods().Informer().HasSynced, informerFactory.Core().V1().Pods().Informer().HasSynced,
informerFactory.Core().V1().Nodes().Informer().HasSynced, informerFactory.Core().V1().Nodes().Informer().HasSynced,
informerFactory.Storage().V1().CSINodes().Informer().HasSynced) { informerFactory.Storage().V1().CSINodes().Informer().HasSynced) {
@ -278,7 +284,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
} }
// Populate ASW // Populate ASW
err = adc.populateActualStateOfWorld() err = adc.populateActualStateOfWorld(logger)
if err != nil { if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err) t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
} }
@ -295,7 +301,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
} }
// Populate DSW // Populate DSW
err = adc.populateDesiredStateOfWorld() err = adc.populateDesiredStateOfWorld(logger)
if err != nil { if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
} }
@ -310,9 +316,8 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
podInformer.GetIndexer().Add(newPod) podInformer.GetIndexer().Add(newPod)
} }
go adc.reconciler.Run(stopCh) go adc.reconciler.Run(ctx)
go adc.desiredStateOfWorldPopulator.Run(stopCh) go adc.desiredStateOfWorldPopulator.Run(ctx)
defer close(stopCh)
time.Sleep(time.Second * 1) // Wait so the reconciler calls sync at least once time.Sleep(time.Second * 1) // Wait so the reconciler calls sync at least once
@ -437,7 +442,11 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
vaInformer := informerFactory.Storage().V1().VolumeAttachments().Informer() vaInformer := informerFactory.Storage().V1().VolumeAttachments().Informer()
// Create the controller // Create the controller
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
adcObj, err := NewAttachDetachController( adcObj, err := NewAttachDetachController(
logger,
fakeKubeClient, fakeKubeClient,
informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(), informerFactory.Core().V1().Nodes(),
@ -537,10 +546,9 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
} }
// Makesure the informer cache is synced // Makesure the informer cache is synced
stopCh := make(chan struct{}) informerFactory.Start(ctx.Done())
informerFactory.Start(stopCh)
if !kcache.WaitForNamedCacheSync("attach detach", stopCh, if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(),
informerFactory.Core().V1().Pods().Informer().HasSynced, informerFactory.Core().V1().Pods().Informer().HasSynced,
informerFactory.Core().V1().Nodes().Informer().HasSynced, informerFactory.Core().V1().Nodes().Informer().HasSynced,
informerFactory.Core().V1().PersistentVolumes().Informer().HasSynced, informerFactory.Core().V1().PersistentVolumes().Informer().HasSynced,
@ -549,21 +557,19 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
} }
// Populate ASW // Populate ASW
err = adc.populateActualStateOfWorld() err = adc.populateActualStateOfWorld(logger)
if err != nil { if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err) t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
} }
// Populate DSW // Populate DSW
err = adc.populateDesiredStateOfWorld() err = adc.populateDesiredStateOfWorld(logger)
if err != nil { if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err) t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
} }
// Run reconciler and DSW populator loops // Run reconciler and DSW populator loops
go adc.reconciler.Run(stopCh) go adc.reconciler.Run(ctx)
go adc.desiredStateOfWorldPopulator.Run(stopCh) go adc.desiredStateOfWorldPopulator.Run(ctx)
defer close(stopCh)
if tc.csiMigration { if tc.csiMigration {
verifyExpectedVolumeState(t, adc, tc) verifyExpectedVolumeState(t, adc, tc)
} else { } else {

View File

@ -23,13 +23,12 @@ package cache
import ( import (
"fmt" "fmt"
"k8s.io/klog/v2"
"sync" "sync"
"time" "time"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
@ -60,7 +59,7 @@ type ActualStateOfWorld interface {
// added. // added.
// If no node with the name nodeName exists in list of attached nodes for // If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added. // the specified volume, the node is added.
AddVolumeNode(uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, attached bool) (v1.UniqueVolumeName, error) AddVolumeNode(logger klog.Logger, uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, attached bool) (v1.UniqueVolumeName, error)
// SetVolumeMountedByNode sets the MountedByNode value for the given volume // SetVolumeMountedByNode sets the MountedByNode value for the given volume
// and node. When set to true the mounted parameter indicates the volume // and node. When set to true the mounted parameter indicates the volume
@ -72,23 +71,23 @@ type ActualStateOfWorld interface {
// returned. // returned.
// If no node with the name nodeName exists in list of attached nodes for // If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned. // the specified volume, an error is returned.
SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error SetVolumeMountedByNode(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified // SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
// node to true indicating the AttachedVolume field in the Node's Status // node to true indicating the AttachedVolume field in the Node's Status
// object needs to be updated by the node updater again. // object needs to be updated by the node updater again.
// If the specified node does not exist in the nodesToUpdateStatusFor list, // If the specified node does not exist in the nodesToUpdateStatusFor list,
// log the error and return // log the error and return
SetNodeStatusUpdateNeeded(nodeName types.NodeName) SetNodeStatusUpdateNeeded(logger klog.Logger, nodeName types.NodeName)
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach // ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
// request any more for the volume // request any more for the volume
ResetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) ResetDetachRequestTime(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no // SetDetachRequestTime sets the detachRequestedTime to current time if this is no
// previous request (the previous detachRequestedTime is zero) and return the time elapsed // previous request (the previous detachRequestedTime is zero) and return the time elapsed
// since last request // since last request
SetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) SetDetachRequestTime(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
// DeleteVolumeNode removes the given volume and node from the underlying // DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the // store indicating the specified volume is no longer attached to the
@ -135,12 +134,12 @@ type ActualStateOfWorld interface {
// this may differ from the actual list of attached volumes for the node // this may differ from the actual list of attached volumes for the node
// since volumes should be removed from this list as soon a detach operation // since volumes should be removed from this list as soon a detach operation
// is considered, before the detach operation is triggered). // is considered, before the detach operation is triggered).
GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume GetVolumesToReportAttached(logger klog.Logger) map[types.NodeName][]v1.AttachedVolume
// GetVolumesToReportAttachedForNode returns the list of volumes that should be reported as // GetVolumesToReportAttachedForNode returns the list of volumes that should be reported as
// attached for the given node. It reports a boolean indicating if there is an update for that // attached for the given node. It reports a boolean indicating if there is an update for that
// node and the corresponding attachedVolumes list. // node and the corresponding attachedVolumes list.
GetVolumesToReportAttachedForNode(name types.NodeName) (bool, []v1.AttachedVolume) GetVolumesToReportAttachedForNode(logger klog.Logger, name types.NodeName) (bool, []v1.AttachedVolume)
// GetNodesToUpdateStatusFor returns the map of nodeNames to nodeToUpdateStatusFor // GetNodesToUpdateStatusFor returns the map of nodeNames to nodeToUpdateStatusFor
GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor
@ -279,15 +278,17 @@ type nodeToUpdateStatusFor struct {
} }
func (asw *actualStateOfWorld) MarkVolumeAsUncertain( func (asw *actualStateOfWorld) MarkVolumeAsUncertain(
logger klog.Logger,
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error { uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error {
_, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, "", false /* isAttached */) _, err := asw.AddVolumeNode(logger, uniqueName, volumeSpec, nodeName, "", false /* isAttached */)
return err return err
} }
func (asw *actualStateOfWorld) MarkVolumeAsAttached( func (asw *actualStateOfWorld) MarkVolumeAsAttached(
logger klog.Logger,
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error { uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
_, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, devicePath, true) _, err := asw.AddVolumeNode(logger, uniqueName, volumeSpec, nodeName, devicePath, true)
return err return err
} }
@ -304,13 +305,15 @@ func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
} }
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached( func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
asw.addVolumeToReportAsAttached(volumeName, nodeName) asw.addVolumeToReportAsAttached(logger, volumeName, nodeName)
} }
func (asw *actualStateOfWorld) AddVolumeNode( func (asw *actualStateOfWorld) AddVolumeNode(
logger klog.Logger,
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, isAttached bool) (v1.UniqueVolumeName, error) { uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, isAttached bool) (v1.UniqueVolumeName, error) {
volumeName := uniqueName volumeName := uniqueName
if volumeName == "" { if volumeName == "" {
@ -354,10 +357,10 @@ func (asw *actualStateOfWorld) AddVolumeNode(
// Update the fields for volume object except the nodes attached to the volumes. // Update the fields for volume object except the nodes attached to the volumes.
volumeObj.devicePath = devicePath volumeObj.devicePath = devicePath
volumeObj.spec = volumeSpec volumeObj.spec = volumeSpec
klog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q", logger.V(2).Info("Volume is already added to attachedVolume list to node, update device path",
volumeName, "volumeName", volumeName,
nodeName, "node", klog.KRef("", string(nodeName)),
devicePath) "devicePath", devicePath)
} }
node, nodeExists := volumeObj.nodesAttachedTo[nodeName] node, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists { if !nodeExists {
@ -370,22 +373,23 @@ func (asw *actualStateOfWorld) AddVolumeNode(
} }
} else { } else {
node.attachedConfirmed = isAttached node.attachedConfirmed = isAttached
klog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q, the current attach state is %t", logger.V(5).Info("Volume is already added to attachedVolume list to the node",
volumeName, "volumeName", volumeName,
nodeName, "node", klog.KRef("", string(nodeName)),
isAttached) "currentAttachState", isAttached)
} }
volumeObj.nodesAttachedTo[nodeName] = node volumeObj.nodesAttachedTo[nodeName] = node
asw.attachedVolumes[volumeName] = volumeObj asw.attachedVolumes[volumeName] = volumeObj
if isAttached { if isAttached {
asw.addVolumeToReportAsAttached(volumeName, nodeName) asw.addVolumeToReportAsAttached(logger, volumeName, nodeName)
} }
return volumeName, nil return volumeName, nil
} }
func (asw *actualStateOfWorld) SetVolumeMountedByNode( func (asw *actualStateOfWorld) SetVolumeMountedByNode(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error { volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -397,21 +401,22 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
nodeObj.mountedByNode = mounted nodeObj.mountedByNode = mounted
volumeObj.nodesAttachedTo[nodeName] = nodeObj volumeObj.nodesAttachedTo[nodeName] = nodeObj
klog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t", logger.V(4).Info("SetVolumeMountedByNode volume to the node",
volumeName, "node", klog.KRef("", string(nodeName)),
nodeName, "volumeName", volumeName,
mounted) "mounted", mounted)
return nil return nil
} }
func (asw *actualStateOfWorld) ResetDetachRequestTime( func (asw *actualStateOfWorld) ResetDetachRequestTime(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName) volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil { if err != nil {
klog.Errorf("Failed to ResetDetachRequestTime with error: %v", err) logger.Error(err, "Failed to ResetDetachRequestTime with error")
return return
} }
nodeObj.detachRequestedTime = time.Time{} nodeObj.detachRequestedTime = time.Time{}
@ -419,6 +424,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime(
} }
func (asw *actualStateOfWorld) SetDetachRequestTime( func (asw *actualStateOfWorld) SetDetachRequestTime(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -431,9 +437,9 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
if nodeObj.detachRequestedTime.IsZero() { if nodeObj.detachRequestedTime.IsZero() {
nodeObj.detachRequestedTime = time.Now() nodeObj.detachRequestedTime = time.Now()
volumeObj.nodesAttachedTo[nodeName] = nodeObj volumeObj.nodesAttachedTo[nodeName] = nodeObj
klog.V(4).Infof("Set detach request time to current time for volume %v on node %q", logger.V(4).Info("Set detach request time to current time for volume on node",
volumeName, "node", klog.KRef("", string(nodeName)),
nodeName) "volumeName", volumeName)
} }
return time.Since(nodeObj.detachRequestedTime), nil return time.Since(nodeObj.detachRequestedTime), nil
} }
@ -488,10 +494,10 @@ func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
// Add the volumeName to the node's volumesToReportAsAttached list // Add the volumeName to the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) addVolumeToReportAsAttached( func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) { logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// In case the volume/node entry is no longer in attachedVolume list, skip the rest // In case the volume/node entry is no longer in attachedVolume list, skip the rest
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil { if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
klog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName) logger.V(4).Info("Volume is no longer attached to node", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName)
return return
} }
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
@ -503,7 +509,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName), volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName),
} }
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
klog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName) logger.V(4).Info("Add new node to nodesToUpdateStatusFor", "node", klog.KRef("", string(nodeName)))
} }
_, nodeToUpdateVolumeExists := _, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName] nodeToUpdate.volumesToReportAsAttached[volumeName]
@ -511,7 +517,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
nodeToUpdate.statusUpdateNeeded = true nodeToUpdate.statusUpdateNeeded = true
nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
klog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName) logger.V(4).Info("Report volume as attached to node", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName)
} }
} }
@ -534,11 +540,11 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN
return nil return nil
} }
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName) { func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(logger klog.Logger, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil { if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil {
klog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err) logger.Info("Failed to update statusUpdateNeeded field in actual state of world", "err", err)
} }
} }
@ -584,8 +590,8 @@ func (asw *actualStateOfWorld) GetAttachState(
} }
// SetVolumeClaimSize sets size of the volume. But this function should not be used from attach_detach controller. // SetVolumeClaimSize sets size of the volume. But this function should not be used from attach_detach controller.
func (asw *actualStateOfWorld) InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) { func (asw *actualStateOfWorld) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) {
klog.V(5).Infof("no-op InitializeClaimSize call in attach-detach controller.") logger.V(5).Info("no-op InitializeClaimSize call in attach-detach controller")
} }
func (asw *actualStateOfWorld) GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity { func (asw *actualStateOfWorld) GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity {
@ -663,7 +669,7 @@ func (asw *actualStateOfWorld) GetNodesForAttachedVolume(volumeName v1.UniqueVol
return nodes return nodes
} }
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume { func (asw *actualStateOfWorld) GetVolumesToReportAttached(logger klog.Logger) map[types.NodeName][]v1.AttachedVolume {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -676,14 +682,14 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating // of this node will be updated, so set the flag statusUpdateNeeded to false indicating
// the current status is already updated. // the current status is already updated.
if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil { if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil {
klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err) logger.Error(err, "Failed to update statusUpdateNeeded field when getting volumes")
} }
} }
return volumesToReportAttached return volumesToReportAttached
} }
func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(nodeName types.NodeName) (bool, []v1.AttachedVolume) { func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(logger klog.Logger, nodeName types.NodeName) (bool, []v1.AttachedVolume) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@ -700,7 +706,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(nodeName types.
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating // of this node will be updated, so set the flag statusUpdateNeeded to false indicating
// the current status is already updated. // the current status is already updated.
if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil { if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil {
klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err) logger.Error(err, "Failed to update statusUpdateNeeded field when getting volumes")
} }
return true, volumesToReportAttached return true, volumesToReportAttached

View File

@ -22,6 +22,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2/ktesting"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
@ -40,7 +41,8 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert // Assert
if err != nil { if err != nil {
@ -75,7 +77,8 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T)
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, false) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, false)
// Assert // Assert
if err != nil { if err != nil {
@ -93,7 +96,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T)
} }
verifyAttachedVolume(t, allVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) verifyAttachedVolume(t, allVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
_, exists := reportAsAttachedVolumesMap[nodeName] _, exists := reportAsAttachedVolumesMap[nodeName]
if exists { if exists {
t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Actual: <node %q exist> Expect: <node does not exist in the reportedAsAttached map", nodeName) t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Actual: <node %q exist> Expect: <node does not exist in the reportedAsAttached map", nodeName)
@ -117,7 +120,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T)
} }
// Add the volume to the node second time with attached set to true // Add the volume to the node second time with attached set to true
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert // Assert
if add2Err != nil { if add2Err != nil {
@ -175,7 +178,8 @@ func Test_AddVolumeNode_Positive_NewVolumeTwoNodesWithFalseAttached(t *testing.T
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, false) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, false)
// Assert // Assert
if err != nil { if err != nil {
@ -187,7 +191,7 @@ func Test_AddVolumeNode_Positive_NewVolumeTwoNodesWithFalseAttached(t *testing.T
t.Fatalf("%q/%q volume/node combo is marked %q, expected 'Uncertain'.", generatedVolumeName, node1Name, volumeNodeComboState) t.Fatalf("%q/%q volume/node combo is marked %q, expected 'Uncertain'.", generatedVolumeName, node1Name, volumeNodeComboState)
} }
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, node2Name, devicePath, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node2Name, devicePath, true)
// Assert // Assert
if add2Err != nil { if add2Err != nil {
@ -230,7 +234,7 @@ func Test_AddVolumeNode_Positive_NewVolumeTwoNodesWithFalseAttached(t *testing.T
t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Expect one node returned.") t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Expect one node returned.")
} }
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
reportedVolumes, exists := reportAsAttachedVolumesMap[node2Name] reportedVolumes, exists := reportAsAttachedVolumesMap[node2Name]
if !exists || len(reportedVolumes) != 1 { if !exists || len(reportedVolumes) != 1 {
t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Actual: <node %q exist> Expect: <node does not exist in the reportedAsAttached map", node2Name) t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Actual: <node %q exist> Expect: <node does not exist in the reportedAsAttached map", node2Name)
@ -250,8 +254,9 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, node2Name, devicePath, true) generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node2Name, devicePath, true)
// Assert // Assert
if add1Err != nil { if add1Err != nil {
@ -299,8 +304,9 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert // Assert
if add1Err != nil { if add1Err != nil {
@ -341,7 +347,8 @@ func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -398,11 +405,12 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, node2Name, devicePath, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node2Name, devicePath, true)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
} }
@ -446,7 +454,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -479,7 +488,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -550,7 +560,8 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -577,14 +588,15 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Name, volume1Spec, node1Name, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volume1Name, volume1Spec, node1Name, devicePath, true)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
volume2Name := v1.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Name, volume2Spec, node2Name, devicePath, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volume2Name, volume2Spec, node2Name, devicePath, true)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
} }
@ -620,12 +632,13 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
if err != nil || plugin == nil { if err != nil || plugin == nil {
t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err) t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err)
} }
generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath, true)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
} }
@ -659,7 +672,8 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -686,14 +700,15 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
// Act // Act
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert // Assert
if setVolumeMountedErr1 != nil { if setVolumeMountedErr1 != nil {
@ -722,7 +737,8 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -735,7 +751,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
// Act // Act
setVolumeMountedErr := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) setVolumeMountedErr := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert // Assert
if setVolumeMountedErr != nil { if setVolumeMountedErr != nil {
@ -762,15 +778,16 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
// Act // Act
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
generatedVolumeName, addErr = asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) generatedVolumeName, addErr = asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert // Assert
if setVolumeMountedErr1 != nil { if setVolumeMountedErr1 != nil {
@ -803,11 +820,12 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil { if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
} }
@ -818,8 +836,8 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
expectedDetachRequestedTime := asw.GetAttachedVolumes()[0].DetachRequestedTime expectedDetachRequestedTime := asw.GetAttachedVolumes()[0].DetachRequestedTime
// Act // Act
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert // Assert
if setVolumeMountedErr1 != nil { if setVolumeMountedErr1 != nil {
@ -850,7 +868,8 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) {
devicePath := "fake/device/path" devicePath := "fake/device/path"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -877,13 +896,14 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
// Act // Act
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil { if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
} }
@ -913,19 +933,20 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
// Act // Act
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil { if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
} }
markDesireToDetachErr := asw.RemoveVolumeFromReportAsAttached(generatedVolumeName, nodeName) markDesireToDetachErr := asw.RemoveVolumeFromReportAsAttached(generatedVolumeName, nodeName)
// Reset detach request time to 0 // Reset detach request time to 0
asw.ResetDetachRequestTime(generatedVolumeName, nodeName) asw.ResetDetachRequestTime(logger, generatedVolumeName, nodeName)
// Assert // Assert
if markDesireToDetachErr != nil { if markDesireToDetachErr != nil {
@ -956,12 +977,13 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
if setVolumeMountedErr1 != nil { if setVolumeMountedErr1 != nil {
t.Fatalf("SetVolumeMountedByNode1 failed. Expected <no error> Actual: <%v>", setVolumeMountedErr1) t.Fatalf("SetVolumeMountedByNode1 failed. Expected <no error> Actual: <%v>", setVolumeMountedErr1)
} }
@ -970,7 +992,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
} }
// Act // Act
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil { if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
} }
@ -999,7 +1021,8 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -1009,7 +1032,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr) t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
} }
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
volumes, exists := reportAsAttachedVolumesMap[nodeName] volumes, exists := reportAsAttachedVolumesMap[nodeName]
if !exists { if !exists {
t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName) t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1032,7 +1055,8 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -1042,7 +1066,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr) t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
} }
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
volumes, exists := reportAsAttachedVolumesMap[nodeName] volumes, exists := reportAsAttachedVolumesMap[nodeName]
if !exists { if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName) t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1051,8 +1075,8 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
t.Fatalf("len(reportAsAttachedVolumes) Expected: <0> Actual: <%v>", len(volumes)) t.Fatalf("len(reportAsAttachedVolumes) Expected: <0> Actual: <%v>", len(volumes))
} }
asw.AddVolumeToReportAsAttached(generatedVolumeName, nodeName) asw.AddVolumeToReportAsAttached(logger, generatedVolumeName, nodeName)
reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached() reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached(logger)
volumes, exists = reportAsAttachedVolumesMap[nodeName] volumes, exists = reportAsAttachedVolumesMap[nodeName]
if !exists { if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName) t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1075,7 +1099,8 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -1085,7 +1110,7 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr) t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
} }
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
volumes, exists := reportAsAttachedVolumesMap[nodeName] volumes, exists := reportAsAttachedVolumesMap[nodeName]
if !exists { if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode failed. Expected: <node %q exists> Actual: <node does not exist in the reportedAsAttached map", nodeName) t.Fatalf("Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode failed. Expected: <node %q exists> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1096,9 +1121,9 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
asw.DeleteVolumeNode(generatedVolumeName, nodeName) asw.DeleteVolumeNode(generatedVolumeName, nodeName)
asw.AddVolumeNode(volumeName, volumeSpec, nodeName, "" /*device path*/, true) asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, "" /*device path*/, true)
reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached() reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached(logger)
volumes, exists = reportAsAttachedVolumesMap[nodeName] volumes, exists = reportAsAttachedVolumesMap[nodeName]
if !exists { if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode failed. Expected: <node %q exists> Actual: <node does not exist in the reportedAsAttached map", nodeName) t.Fatalf("Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode failed. Expected: <node %q exists> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1120,13 +1145,14 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
maxWaitTime := 1 * time.Second maxWaitTime := 1 * time.Second
etime, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) etime, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil { if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
} }
@ -1135,7 +1161,7 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
} }
// Sleep and call SetDetachRequestTime again // Sleep and call SetDetachRequestTime again
time.Sleep(maxWaitTime) time.Sleep(maxWaitTime)
etime, err = asw.SetDetachRequestTime(generatedVolumeName, nodeName) etime, err = asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil { if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
} }
@ -1167,7 +1193,8 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil { if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
} }
@ -1191,14 +1218,15 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
_, add1Err := asw.AddVolumeNode(volume1Name, volume1Spec, node1Name, devicePath, true) logger, _ := ktesting.NewTestContext(t)
_, add1Err := asw.AddVolumeNode(logger, volume1Name, volume1Spec, node1Name, devicePath, true)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
volume2Name := v1.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Name, volume2Spec, node2Name, devicePath, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volume2Name, volume2Spec, node2Name, devicePath, true)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
} }
@ -1222,6 +1250,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
logger, _ := ktesting.NewTestContext(t)
plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil { if err != nil || plugin == nil {
t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err) t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err)
@ -1230,12 +1259,12 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
if err != nil || plugin == nil { if err != nil || plugin == nil {
t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err) t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err)
} }
generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath, true) generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath, true)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
} }
@ -1266,6 +1295,7 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath1 := "fake/device/path1" devicePath1 := "fake/device/path1"
logger, _ := ktesting.NewTestContext(t)
plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil { if err != nil || plugin == nil {
t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err) t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err)
@ -1274,13 +1304,13 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
if err != nil || plugin == nil { if err != nil || plugin == nil {
t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err) t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err)
} }
generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath1, true) generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath1, true)
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
devicePath2 := "fake/device/path2" devicePath2 := "fake/device/path2"
generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath2, true) generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath2, true)
if add2Err != nil { if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
} }
@ -1313,7 +1343,8 @@ func Test_SetNodeStatusUpdateNeededError(t *testing.T) {
nodeName := types.NodeName("node-1") nodeName := types.NodeName("node-1")
// Act // Act
asw.SetNodeStatusUpdateNeeded(nodeName) logger, _ := ktesting.NewTestContext(t)
asw.SetNodeStatusUpdateNeeded(logger, nodeName)
// Assert // Assert
nodesToUpdateStatusFor := asw.GetNodesToUpdateStatusFor() nodesToUpdateStatusFor := asw.GetNodesToUpdateStatusFor()
@ -1393,7 +1424,8 @@ func Test_MarkVolumeAsAttached(t *testing.T) {
} }
// Act // Act
err = asw.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath) logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, devicePath)
// Assert // Assert
if err != nil { if err != nil {
@ -1429,7 +1461,8 @@ func Test_MarkVolumeAsUncertain(t *testing.T) {
} }
// Act // Act
err = asw.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName) logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsUncertain(logger, volumeName, volumeSpec, nodeName)
// Assert // Assert
if err != nil { if err != nil {
@ -1464,14 +1497,15 @@ func Test_GetVolumesToReportAttachedForNode_Positive(t *testing.T) {
devicePath := "fake/device/path" devicePath := "fake/device/path"
// Act // Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert // Assert
if err != nil { if err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err)
} }
needsUpdate, attachedVolumes := asw.GetVolumesToReportAttachedForNode(nodeName) needsUpdate, attachedVolumes := asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if !needsUpdate { if !needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q does not need an update> Expect: <node exists in the reportedAsAttached map and needs an update", nodeName) t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q does not need an update> Expect: <node exists in the reportedAsAttached map and needs an update", nodeName)
} }
@ -1479,7 +1513,7 @@ func Test_GetVolumesToReportAttachedForNode_Positive(t *testing.T) {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes)) t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
} }
needsUpdate, _ = asw.GetVolumesToReportAttachedForNode(nodeName) needsUpdate, _ = asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if needsUpdate { if needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q needs an update> Expect: <node exists in the reportedAsAttached map and does not need an update", nodeName) t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q needs an update> Expect: <node exists in the reportedAsAttached map and does not need an update", nodeName)
} }
@ -1489,7 +1523,7 @@ func Test_GetVolumesToReportAttachedForNode_Positive(t *testing.T) {
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr) t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
} }
needsUpdate, attachedVolumes = asw.GetVolumesToReportAttachedForNode(nodeName) needsUpdate, attachedVolumes = asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if !needsUpdate { if !needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q does not need an update> Expect: <node exists in the reportedAsAttached map and needs an update", nodeName) t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q does not need an update> Expect: <node exists in the reportedAsAttached map and needs an update", nodeName)
} }
@ -1504,8 +1538,8 @@ func Test_GetVolumesToReportAttachedForNode_UnknownNode(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
logger, _ := ktesting.NewTestContext(t)
needsUpdate, _ := asw.GetVolumesToReportAttachedForNode(nodeName) needsUpdate, _ := asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if needsUpdate { if needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_UnknownNode failed. Actual: <node %q needs an update> Expect: <node does not exist in the reportedAsAttached map and does not need an update", nodeName) t.Fatalf("GetVolumesToReportAttachedForNode_UnknownNode failed. Actual: <node %q needs an update> Expect: <node does not exist in the reportedAsAttached map and does not need an update", nodeName)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package metrics package metrics
import ( import (
"errors"
"sync" "sync"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -129,7 +130,7 @@ func (collector *attachDetachStateCollector) DescribeWithStability(ch chan<- *me
} }
func (collector *attachDetachStateCollector) CollectWithStability(ch chan<- metrics.Metric) { func (collector *attachDetachStateCollector) CollectWithStability(ch chan<- metrics.Metric) {
nodeVolumeMap := collector.getVolumeInUseCount() nodeVolumeMap := collector.getVolumeInUseCount(klog.TODO())
for nodeName, pluginCount := range nodeVolumeMap { for nodeName, pluginCount := range nodeVolumeMap {
for pluginName, count := range pluginCount { for pluginName, count := range pluginCount {
ch <- metrics.NewLazyConstMetric(inUseVolumeMetricDesc, ch <- metrics.NewLazyConstMetric(inUseVolumeMetricDesc,
@ -152,10 +153,10 @@ func (collector *attachDetachStateCollector) CollectWithStability(ch chan<- metr
} }
} }
func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount { func (collector *attachDetachStateCollector) getVolumeInUseCount(logger klog.Logger) volumeCount {
pods, err := collector.podLister.List(labels.Everything()) pods, err := collector.podLister.List(labels.Everything())
if err != nil { if err != nil {
klog.Errorf("Error getting pod list") logger.Error(errors.New("Error getting pod list"), "Get pod list failed")
return nil return nil
} }
@ -169,7 +170,7 @@ func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount {
continue continue
} }
for _, podVolume := range pod.Spec.Volumes { for _, podVolume := range pod.Spec.Volumes {
volumeSpec, err := util.CreateVolumeSpec(podVolume, pod, types.NodeName(pod.Spec.NodeName), collector.volumePluginMgr, collector.pvcLister, collector.pvLister, collector.csiMigratedPluginManager, collector.intreeToCSITranslator) volumeSpec, err := util.CreateVolumeSpec(logger, podVolume, pod, types.NodeName(pod.Spec.NodeName), collector.volumePluginMgr, collector.pvcLister, collector.pvLister, collector.csiMigratedPluginManager, collector.intreeToCSITranslator)
if err != nil { if err != nil {
continue continue
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
@ -121,7 +122,8 @@ func TestVolumesInUseMetricCollection(t *testing.T) {
fakeVolumePluginMgr, fakeVolumePluginMgr,
csimigration.NewPluginManager(csiTranslator, utilfeature.DefaultFeatureGate), csimigration.NewPluginManager(csiTranslator, utilfeature.DefaultFeatureGate),
csiTranslator) csiTranslator)
nodeUseMap := metricCollector.getVolumeInUseCount() logger, _ := ktesting.NewTestContext(t)
nodeUseMap := metricCollector.getVolumeInUseCount(logger)
if len(nodeUseMap) < 1 { if len(nodeUseMap) < 1 {
t.Errorf("Expected one volume in use got %d", len(nodeUseMap)) t.Errorf("Expected one volume in use got %d", len(nodeUseMap))
} }
@ -150,7 +152,8 @@ func TestTotalVolumesMetricCollection(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Expected no error, got %v", err) t.Fatalf("Expected no error, got %v", err)
} }
asw.AddVolumeNode(volumeName, volumeSpec, nodeName, "", true) logger, _ := ktesting.NewTestContext(t)
asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, "", true)
csiTranslator := csitrans.New() csiTranslator := csitrans.New()
metricCollector := newAttachDetachStateCollector( metricCollector := newAttachDetachStateCollector(

View File

@ -19,6 +19,7 @@ limitations under the License.
package populator package populator
import ( import (
"context"
"fmt" "fmt"
"time" "time"
@ -43,7 +44,7 @@ import (
// each one exists in the desired state of the world cache // each one exists in the desired state of the world cache
// if it has volumes. // if it has volumes.
type DesiredStateOfWorldPopulator interface { type DesiredStateOfWorldPopulator interface {
Run(stopCh <-chan struct{}) Run(ctx context.Context)
} }
// NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator. // NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator.
@ -90,35 +91,36 @@ type desiredStateOfWorldPopulator struct {
intreeToCSITranslator csimigration.InTreeToCSITranslator intreeToCSITranslator csimigration.InTreeToCSITranslator
} }
func (dswp *desiredStateOfWorldPopulator) Run(stopCh <-chan struct{}) { func (dswp *desiredStateOfWorldPopulator) Run(ctx context.Context) {
wait.Until(dswp.populatorLoopFunc(), dswp.loopSleepDuration, stopCh) wait.UntilWithContext(ctx, dswp.populatorLoopFunc(ctx), dswp.loopSleepDuration)
} }
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() { func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc(ctx context.Context) func(ctx context.Context) {
return func() { return func(ctx context.Context) {
dswp.findAndRemoveDeletedPods() logger := klog.FromContext(ctx)
dswp.findAndRemoveDeletedPods(logger)
// findAndAddActivePods is called periodically, independently of the main // findAndAddActivePods is called periodically, independently of the main
// populator loop. // populator loop.
if time.Since(dswp.timeOfLastListPods) < dswp.listPodsRetryDuration { if time.Since(dswp.timeOfLastListPods) < dswp.listPodsRetryDuration {
klog.V(5).Infof( logger.V(5).Info(
"Skipping findAndAddActivePods(). Not permitted until %v (listPodsRetryDuration %v).", "Skipping findAndAddActivePods(). Not permitted until the retry time is reached",
dswp.timeOfLastListPods.Add(dswp.listPodsRetryDuration), "retryTime", dswp.timeOfLastListPods.Add(dswp.listPodsRetryDuration),
dswp.listPodsRetryDuration) "retryDuration", dswp.listPodsRetryDuration)
return return
} }
dswp.findAndAddActivePods() dswp.findAndAddActivePods(logger)
} }
} }
// Iterate through all pods in desired state of world, and remove if they no // Iterate through all pods in desired state of world, and remove if they no
// longer exist in the informer // longer exist in the informer
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods(logger klog.Logger) {
for dswPodUID, dswPodToAdd := range dswp.desiredStateOfWorld.GetPodToAdd() { for dswPodUID, dswPodToAdd := range dswp.desiredStateOfWorld.GetPodToAdd() {
dswPodKey, err := kcache.MetaNamespaceKeyFunc(dswPodToAdd.Pod) dswPodKey, err := kcache.MetaNamespaceKeyFunc(dswPodToAdd.Pod)
if err != nil { if err != nil {
klog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err) logger.Error(err, "MetaNamespaceKeyFunc failed for pod", "podName", dswPodKey, "podUID", dswPodUID)
continue continue
} }
@ -133,7 +135,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
case errors.IsNotFound(err): case errors.IsNotFound(err):
// if we can't find the pod, we need to delete it below // if we can't find the pod, we need to delete it below
case err != nil: case err != nil:
klog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err) logger.Error(err, "podLister Get failed for pod", "podName", dswPodKey, "podUID", dswPodUID)
continue continue
default: default:
volumeActionFlag := util.DetermineVolumeAction( volumeActionFlag := util.DetermineVolumeAction(
@ -145,7 +147,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
informerPodUID := volutil.GetUniquePodName(informerPod) informerPodUID := volutil.GetUniquePodName(informerPod)
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer // Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
if informerPodUID == dswPodUID { if informerPodUID == dswPodUID {
klog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID) logger.V(10).Info("Verified podfrom dsw exists in pod informer", "podName", dswPodKey, "podUID", dswPodUID)
continue continue
} }
} }
@ -153,7 +155,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
// the pod from dsw does not exist in pod informer, or it does not match the unique identifier retrieved // the pod from dsw does not exist in pod informer, or it does not match the unique identifier retrieved
// from the informer, delete it from dsw // from the informer, delete it from dsw
klog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID) logger.V(1).Info("Removing pod from dsw because it does not exist in pod informer", "podName", dswPodKey, "podUID", dswPodUID)
dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName) dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName)
} }
@ -163,21 +165,21 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
// The result is returned from CSIDriverLister which is from local cache. So this is not an expensive call. // The result is returned from CSIDriverLister which is from local cache. So this is not an expensive call.
volumeAttachable := volutil.IsAttachableVolume(volumeToAttach.VolumeSpec, dswp.volumePluginMgr) volumeAttachable := volutil.IsAttachableVolume(volumeToAttach.VolumeSpec, dswp.volumePluginMgr)
if !volumeAttachable { if !volumeAttachable {
klog.Infof("Volume %v changes from attachable to non-attachable.", volumeToAttach.VolumeName) logger.Info("Volume changes from attachable to non-attachable", "volumeName", volumeToAttach.VolumeName)
for _, scheduledPod := range volumeToAttach.ScheduledPods { for _, scheduledPod := range volumeToAttach.ScheduledPods {
podUID := volutil.GetUniquePodName(scheduledPod) podUID := volutil.GetUniquePodName(scheduledPod)
dswp.desiredStateOfWorld.DeletePod(podUID, volumeToAttach.VolumeName, volumeToAttach.NodeName) dswp.desiredStateOfWorld.DeletePod(podUID, volumeToAttach.VolumeName, volumeToAttach.NodeName)
klog.V(4).Infof("Removing podUID: %v, volume: %v on node: %v from desired state of world"+ logger.V(4).Info("Removing podUID and volume on node from desired state of world"+
" because of the change of volume attachability.", podUID, volumeToAttach.VolumeName, volumeToAttach.NodeName) " because of the change of volume attachability", "node", klog.KRef("", string(volumeToAttach.NodeName)), "podUID", podUID, "volumeName", volumeToAttach.VolumeName)
} }
} }
} }
} }
func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() { func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods(logger klog.Logger) {
pods, err := dswp.podLister.List(labels.Everything()) pods, err := dswp.podLister.List(labels.Everything())
if err != nil { if err != nil {
klog.Errorf("podLister List failed: %v", err) logger.Error(err, "PodLister List failed")
return return
} }
dswp.timeOfLastListPods = time.Now() dswp.timeOfLastListPods = time.Now()
@ -187,7 +189,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() {
// Do not add volumes for terminated pods // Do not add volumes for terminated pods
continue continue
} }
util.ProcessPodVolumes(pod, true, util.ProcessPodVolumes(logger, pod, true,
dswp.desiredStateOfWorld, dswp.volumePluginMgr, dswp.pvcLister, dswp.pvLister, dswp.csiMigratedPluginManager, dswp.intreeToCSITranslator) dswp.desiredStateOfWorld, dswp.volumePluginMgr, dswp.pvcLister, dswp.pvLister, dswp.csiMigratedPluginManager, dswp.intreeToCSITranslator)
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/csimigration"
@ -91,8 +92,8 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
//add the given node to the list of nodes managed by dsw //add the given node to the list of nodes managed by dsw
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/) dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/)
logger, _ := ktesting.NewTestContext(t)
dswp.findAndAddActivePods() dswp.findAndAddActivePods(logger)
expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName) expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)
@ -118,7 +119,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
} }
//add pod and volume again //add pod and volume again
dswp.findAndAddActivePods() dswp.findAndAddActivePods(logger)
//check if the given volume referenced by the pod is added to dsw for the second time //check if the given volume referenced by the pod is added to dsw for the second time
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName)) volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
@ -130,7 +131,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
} }
fakePodInformer.Informer().GetStore().Delete(pod) fakePodInformer.Informer().GetStore().Delete(pod)
dswp.findAndRemoveDeletedPods() dswp.findAndRemoveDeletedPods(logger)
//check if the given volume referenced by the pod still exists in dsw //check if the given volume referenced by the pod still exists in dsw
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName)) volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
if volumeExists { if volumeExists {
@ -196,8 +197,8 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
//add the given node to the list of nodes managed by dsw //add the given node to the list of nodes managed by dsw
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/) dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/)
logger, _ := ktesting.NewTestContext(t)
dswp.findAndAddActivePods() dswp.findAndAddActivePods(logger)
expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName) expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)
@ -213,7 +214,7 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
// Change the CSI volume plugin attachability // Change the CSI volume plugin attachability
fakeVolumePlugin.NonAttachable = true fakeVolumePlugin.NonAttachable = true
dswp.findAndRemoveDeletedPods() dswp.findAndRemoveDeletedPods(logger)
// The volume should not exist after it becomes non-attachable // The volume should not exist after it becomes non-attachable
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName)) volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))

View File

@ -20,6 +20,7 @@ limitations under the License.
package reconciler package reconciler
import ( import (
"context"
"fmt" "fmt"
"strings" "strings"
"time" "time"
@ -53,7 +54,7 @@ type Reconciler interface {
// if volumes that should be attached are attached and volumes that should // if volumes that should be attached are attached and volumes that should
// be detached are detached. If not, it will trigger attach/detach // be detached are detached. If not, it will trigger attach/detach
// operations to rectify. // operations to rectify.
Run(stopCh <-chan struct{}) Run(ctx context.Context)
} }
// NewReconciler returns a new instance of Reconciler that waits loopPeriod // NewReconciler returns a new instance of Reconciler that waits loopPeriod
@ -105,24 +106,24 @@ type reconciler struct {
recorder record.EventRecorder recorder record.EventRecorder
} }
func (rc *reconciler) Run(stopCh <-chan struct{}) { func (rc *reconciler) Run(ctx context.Context) {
wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh) wait.UntilWithContext(ctx, rc.reconciliationLoopFunc(ctx), rc.loopPeriod)
} }
// reconciliationLoopFunc this can be disabled via cli option disableReconciliation. // reconciliationLoopFunc this can be disabled via cli option disableReconciliation.
// It periodically checks whether the attached volumes from actual state // It periodically checks whether the attached volumes from actual state
// are still attached to the node and update the status if they are not. // are still attached to the node and update the status if they are not.
func (rc *reconciler) reconciliationLoopFunc() func() { func (rc *reconciler) reconciliationLoopFunc(ctx context.Context) func(context.Context) {
return func() { return func(ctx context.Context) {
rc.reconcile()
rc.reconcile(ctx)
logger := klog.FromContext(ctx)
if rc.disableReconciliationSync { if rc.disableReconciliationSync {
klog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.") logger.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line")
} else if rc.syncDuration < time.Second { } else if rc.syncDuration < time.Second {
klog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.") logger.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line")
} else if time.Since(rc.timeOfLastSync) > rc.syncDuration { } else if time.Since(rc.timeOfLastSync) > rc.syncDuration {
klog.V(5).Info("Starting reconciling attached volumes still attached") logger.V(5).Info("Starting reconciling attached volumes still attached")
rc.sync() rc.sync()
} }
} }
@ -164,11 +165,12 @@ func (rc *reconciler) nodeIsHealthy(nodeName types.NodeName) (bool, error) {
return nodeutil.IsNodeReady(node), nil return nodeutil.IsNodeReady(node), nil
} }
func (rc *reconciler) reconcile() { func (rc *reconciler) reconcile(ctx context.Context) {
// Detaches are triggered before attaches so that volumes referenced by // Detaches are triggered before attaches so that volumes referenced by
// pods that are rescheduled to a different node are detached first. // pods that are rescheduled to a different node are detached first.
// Ensure volumes that should be detached are detached. // Ensure volumes that should be detached are detached.
logger := klog.FromContext(ctx)
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() { for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists( if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) { attachedVolume.VolumeName, attachedVolume.NodeName) {
@ -182,12 +184,12 @@ func (rc *reconciler) reconcile() {
// allows multi attach across different nodes. // allows multi attach across different nodes.
if util.IsMultiAttachAllowed(attachedVolume.VolumeSpec) { if util.IsMultiAttachAllowed(attachedVolume.VolumeSpec) {
if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, attachedVolume.NodeName, operationexecutor.DetachOperationName) { if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, attachedVolume.NodeName, operationexecutor.DetachOperationName) {
klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff for node %q. Can't start detach", attachedVolume.VolumeName, attachedVolume.NodeName) logger.V(10).Info("Operation for volume is already running or still in exponential backoff for node. Can't start detach", "node", klog.KRef("", string(attachedVolume.NodeName)), "volumeName", attachedVolume.VolumeName)
continue continue
} }
} else { } else {
if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, "" /* nodeName */, operationexecutor.DetachOperationName) { if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, "" /* nodeName */, operationexecutor.DetachOperationName) {
klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff in the cluster. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName) logger.V(10).Info("Operation for volume is already running or still in exponential backoff in the cluster. Can't start detach for node", "node", klog.KRef("", string(attachedVolume.NodeName)), "volumeName", attachedVolume.VolumeName)
continue continue
} }
} }
@ -201,14 +203,14 @@ func (rc *reconciler) reconcile() {
// See https://github.com/kubernetes/kubernetes/issues/93902 // See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName) attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName)
if attachState == cache.AttachStateDetached { if attachState == cache.AttachStateDetached {
klog.V(5).InfoS("Volume detached--skipping", "volume", attachedVolume) logger.V(5).Info("Volume detached--skipping", "volume", attachedVolume)
continue continue
} }
// Set the detach request time // Set the detach request time
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName) elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(logger, attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil { if err != nil {
klog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err) logger.Error(err, "Cannot trigger detach because it fails to set detach request time with error")
continue continue
} }
// Check whether timeout has reached the maximum waiting time // Check whether timeout has reached the maximum waiting time
@ -216,7 +218,7 @@ func (rc *reconciler) reconcile() {
isHealthy, err := rc.nodeIsHealthy(attachedVolume.NodeName) isHealthy, err := rc.nodeIsHealthy(attachedVolume.NodeName)
if err != nil { if err != nil {
klog.Errorf("failed to get health of node %s: %s", attachedVolume.NodeName, err.Error()) logger.Error(err, "Failed to get health of node", "node", klog.KRef("", string(attachedVolume.NodeName)))
} }
// Force detach volumes from unhealthy nodes after maxWaitForUnmountDuration. // Force detach volumes from unhealthy nodes after maxWaitForUnmountDuration.
@ -224,13 +226,13 @@ func (rc *reconciler) reconcile() {
hasOutOfServiceTaint, err := rc.hasOutOfServiceTaint(attachedVolume.NodeName) hasOutOfServiceTaint, err := rc.hasOutOfServiceTaint(attachedVolume.NodeName)
if err != nil { if err != nil {
klog.Errorf("failed to get taint specs for node %s: %s", attachedVolume.NodeName, err.Error()) logger.Error(err, "Failed to get taint specs for node", "node", klog.KRef("", string(attachedVolume.NodeName)))
} }
// Check whether volume is still mounted. Skip detach if it is still mounted unless force detach timeout // Check whether volume is still mounted. Skip detach if it is still mounted unless force detach timeout
// or the node has `node.kubernetes.io/out-of-service` taint. // or the node has `node.kubernetes.io/out-of-service` taint.
if attachedVolume.MountedByNode && !forceDetach && !hasOutOfServiceTaint { if attachedVolume.MountedByNode && !forceDetach && !hasOutOfServiceTaint {
klog.V(5).InfoS("Cannot detach volume because it is still mounted", "volume", attachedVolume) logger.V(5).Info("Cannot detach volume because it is still mounted", "volume", attachedVolume)
continue continue
} }
@ -240,77 +242,77 @@ func (rc *reconciler) reconcile() {
// has the correct volume attachment information. // has the correct volume attachment information.
err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName) err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil { if err != nil {
klog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v", logger.V(5).Info("RemoveVolumeFromReportAsAttached failed while removing volume from node",
attachedVolume.VolumeName, "node", klog.KRef("", string(attachedVolume.NodeName)),
attachedVolume.NodeName, "volumeName", attachedVolume.VolumeName,
err) "err", err)
} }
// Update Node Status to indicate volume is no longer safe to mount. // Update Node Status to indicate volume is no longer safe to mount.
err = rc.nodeStatusUpdater.UpdateNodeStatusForNode(attachedVolume.NodeName) err = rc.nodeStatusUpdater.UpdateNodeStatusForNode(logger, attachedVolume.NodeName)
if err != nil { if err != nil {
// Skip detaching this volume if unable to update node status // Skip detaching this volume if unable to update node status
klog.ErrorS(err, "UpdateNodeStatusForNode failed while attempting to report volume as attached", "volume", attachedVolume) logger.Error(err, "UpdateNodeStatusForNode failed while attempting to report volume as attached", "volume", attachedVolume)
// Add volume back to ReportAsAttached if UpdateNodeStatusForNode call failed so that node status updater will add it back to VolumeAttached list. // Add volume back to ReportAsAttached if UpdateNodeStatusForNode call failed so that node status updater will add it back to VolumeAttached list.
// It is needed here too because DetachVolume is not call actually and we keep the data consistency for every reconcile. // It is needed here too because DetachVolume is not call actually and we keep the data consistency for every reconcile.
rc.actualStateOfWorld.AddVolumeToReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName) rc.actualStateOfWorld.AddVolumeToReportAsAttached(logger, attachedVolume.VolumeName, attachedVolume.NodeName)
continue continue
} }
// Trigger detach volume which requires verifying safe to detach step // Trigger detach volume which requires verifying safe to detach step
// If timeout is true, skip verifySafeToDetach check // If timeout is true, skip verifySafeToDetach check
// If the node has node.kubernetes.io/out-of-service taint with NoExecute effect, skip verifySafeToDetach check // If the node has node.kubernetes.io/out-of-service taint with NoExecute effect, skip verifySafeToDetach check
klog.V(5).InfoS("Starting attacherDetacher.DetachVolume", "volume", attachedVolume) logger.V(5).Info("Starting attacherDetacher.DetachVolume", "volume", attachedVolume)
if hasOutOfServiceTaint { if hasOutOfServiceTaint {
klog.V(4).Infof("node %q has out-of-service taint", attachedVolume.NodeName) logger.V(4).Info("node has out-of-service taint", "node", klog.KRef("", string(attachedVolume.NodeName)))
} }
verifySafeToDetach := !(timeout || hasOutOfServiceTaint) verifySafeToDetach := !(timeout || hasOutOfServiceTaint)
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld) err = rc.attacherDetacher.DetachVolume(logger, attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
if err == nil { if err == nil {
if !timeout { if !timeout {
klog.InfoS("attacherDetacher.DetachVolume started", "volume", attachedVolume) logger.Info("attacherDetacher.DetachVolume started", "volume", attachedVolume)
} else { } else {
metrics.RecordForcedDetachMetric() metrics.RecordForcedDetachMetric()
klog.InfoS("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume) logger.Info("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume)
} }
} }
if err != nil { if err != nil {
// Add volume back to ReportAsAttached if DetachVolume call failed so that node status updater will add it back to VolumeAttached list. // Add volume back to ReportAsAttached if DetachVolume call failed so that node status updater will add it back to VolumeAttached list.
// This function is also called during executing the volume detach operation in operation_generoator. // This function is also called during executing the volume detach operation in operation_generoator.
// It is needed here too because DetachVolume call might fail before executing the actual operation in operation_executor (e.g., cannot find volume plugin etc.) // It is needed here too because DetachVolume call might fail before executing the actual operation in operation_executor (e.g., cannot find volume plugin etc.)
rc.actualStateOfWorld.AddVolumeToReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName) rc.actualStateOfWorld.AddVolumeToReportAsAttached(logger, attachedVolume.VolumeName, attachedVolume.NodeName)
if !exponentialbackoff.IsExponentialBackoff(err) { if !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors. // Log all other errors.
klog.ErrorS(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume) logger.Error(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume)
} }
} }
} }
} }
rc.attachDesiredVolumes() rc.attachDesiredVolumes(logger)
// Update Node Status // Update Node Status
err := rc.nodeStatusUpdater.UpdateNodeStatuses() err := rc.nodeStatusUpdater.UpdateNodeStatuses(logger)
if err != nil { if err != nil {
klog.Warningf("UpdateNodeStatuses failed with: %v", err) logger.Info("UpdateNodeStatuses failed", "err", err)
} }
} }
func (rc *reconciler) attachDesiredVolumes() { func (rc *reconciler) attachDesiredVolumes(logger klog.Logger) {
// Ensure volumes that should be attached are attached. // Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() { for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) { if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node. // Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) { if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
klog.V(10).Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName) logger.V(10).Info("Operation for volume is already running for node. Can't start attach", "node", klog.KRef("", string(volumeToAttach.NodeName)), "volumeName", volumeToAttach.VolumeName)
continue continue
} }
} else { } else {
// Don't even try to start an operation if there is already one running for the given volume // Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) { if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
klog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) logger.V(10).Info("Operation for volume is already running. Can't start attach for node", "node", klog.KRef("", string(volumeToAttach.NodeName)), "volumeNames", volumeToAttach.VolumeName)
continue continue
} }
} }
@ -323,8 +325,8 @@ func (rc *reconciler) attachDesiredVolumes() {
attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName) attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName)
if attachState == cache.AttachStateAttached { if attachState == cache.AttachStateAttached {
// Volume/Node exists, touch it to reset detachRequestedTime // Volume/Node exists, touch it to reset detachRequestedTime
klog.V(10).InfoS("Volume attached--touching", "volume", volumeToAttach) logger.V(10).Info("Volume attached--touching", "volume", volumeToAttach)
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName) rc.actualStateOfWorld.ResetDetachRequestTime(logger, volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue continue
} }
@ -332,7 +334,7 @@ func (rc *reconciler) attachDesiredVolumes() {
nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName) nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName)
if len(nodes) > 0 { if len(nodes) > 0 {
if !volumeToAttach.MultiAttachErrorReported { if !volumeToAttach.MultiAttachErrorReported {
rc.reportMultiAttachError(volumeToAttach, nodes) rc.reportMultiAttachError(logger, volumeToAttach, nodes)
rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName) rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName)
} }
continue continue
@ -340,22 +342,22 @@ func (rc *reconciler) attachDesiredVolumes() {
} }
// Volume/Node doesn't exist, spawn a goroutine to attach it // Volume/Node doesn't exist, spawn a goroutine to attach it
klog.V(5).InfoS("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach) logger.V(5).Info("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach)
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld) err := rc.attacherDetacher.AttachVolume(logger, volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil { if err == nil {
klog.InfoS("attacherDetacher.AttachVolume started", "volume", volumeToAttach) logger.Info("attacherDetacher.AttachVolume started", "volume", volumeToAttach)
} }
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) { if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors. // Log all other errors.
klog.ErrorS(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach) logger.Error(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach)
} }
} }
} }
// reportMultiAttachError sends events and logs situation that a volume that // reportMultiAttachError sends events and logs situation that a volume that
// should be attached to a node is already attached to different node(s). // should be attached to a node is already attached to different node(s).
func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) { func (rc *reconciler) reportMultiAttachError(logger klog.Logger, volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) {
// Filter out the current node from list of nodes where the volume is // Filter out the current node from list of nodes where the volume is
// attached. // attached.
// Some methods need []string, some other needs []NodeName, collect both. // Some methods need []string, some other needs []NodeName, collect both.
@ -373,7 +375,6 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
// Get list of pods that use the volume on the other nodes. // Get list of pods that use the volume on the other nodes.
pods := rc.desiredStateOfWorld.GetVolumePodsOnNodes(otherNodes, volumeToAttach.VolumeName) pods := rc.desiredStateOfWorld.GetVolumePodsOnNodes(otherNodes, volumeToAttach.VolumeName)
if len(pods) == 0 { if len(pods) == 0 {
// We did not find any pods that requests the volume. The pod must have been deleted already. // We did not find any pods that requests the volume. The pod must have been deleted already.
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another") simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another")
@ -381,7 +382,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg) rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
} }
// Log detailed message to system admin // Log detailed message to system admin
klog.InfoS("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach) logger.Info("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach)
return return
} }
@ -417,5 +418,5 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
} }
// Log all pods for system admin // Log all pods for system admin
klog.InfoS("Multi-Attach error: volume is already used by pods", "pods", klog.KObjSlice(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach) logger.Info("Multi-Attach error: volume is already used by pods", "pods", klog.KObjSlice(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package reconciler package reconciler
import ( import (
"context"
"testing" "testing"
"time" "time"
@ -28,6 +29,8 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
@ -40,10 +43,10 @@ import (
) )
const ( const (
reconcilerLoopPeriod time.Duration = 10 * time.Millisecond reconcilerLoopPeriod = 10 * time.Millisecond
syncLoopPeriod time.Duration = 100 * time.Minute syncLoopPeriod = 100 * time.Minute
maxWaitForUnmountDuration time.Duration = 50 * time.Millisecond maxWaitForUnmountDuration = 50 * time.Millisecond
maxLongWaitForUnmountDuration time.Duration = 4200 * time.Second maxLongWaitForUnmountDuration = 4200 * time.Second
) )
// Calls Run() // Calls Run()
@ -70,9 +73,10 @@ func Test_Run_Positive_DoNothing(t *testing.T) {
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder) reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder)
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin)
@ -122,9 +126,10 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
} }
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -175,9 +180,10 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
} }
// Act // Act
ch := make(chan struct{}) logger, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -196,8 +202,8 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
generatedVolumeName, generatedVolumeName,
nodeName) nodeName)
} }
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert // Assert
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -250,9 +256,10 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
} }
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -324,9 +331,10 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
} }
// Act // Act
ch := make(chan struct{}) logger, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -345,8 +353,8 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
generatedVolumeName, generatedVolumeName,
nodeName) nodeName)
} }
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert // Assert
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
@ -403,9 +411,10 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.
} }
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 2 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
@ -497,9 +506,10 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteOnce(t *testing.
} }
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -579,9 +589,10 @@ func Test_Run_OneVolumeAttachAndDetachUncertainNodesWithReadWriteOnce(t *testing
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/) dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
// Act // Act
ch := make(chan struct{}) logger, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Add the pod in which the volume is attached to the uncertain node // Add the pod in which the volume is attached to the uncertain node
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1) generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
@ -593,11 +604,11 @@ func Test_Run_OneVolumeAttachAndDetachUncertainNodesWithReadWriteOnce(t *testing
// Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node. // Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse. // When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse.
// Without this, the delete operation will be delayed due to mounted status // Without this, the delete operation will be delayed due to mounted status
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName1, false /* mounted */) asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName1, false /* mounted */)
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
@ -629,9 +640,12 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t *
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
nodeLister := informerFactory.Core().V1().Nodes().Lister() nodeLister := informerFactory.Core().V1().Nodes().Lister()
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */) nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
rc := NewReconciler( rc := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder) reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder)
reconciliationLoopFunc := rc.(*reconciler).reconciliationLoopFunc() reconciliationLoopFunc := rc.(*reconciler).reconciliationLoopFunc(ctx)
podName1 := "pod-uid1" podName1 := "pod-uid1"
volumeName := v1.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
@ -646,22 +660,22 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t *
} }
// Act // Act
reconciliationLoopFunc() reconciliationLoopFunc(ctx)
// Volume is added to asw, volume should be reported as attached to the node. // Volume is added to asw, volume should be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// Delete the pod // Delete the pod
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
// Mock NodeStatusUpdate fail // Mock NodeStatusUpdate fail
rc.(*reconciler).nodeStatusUpdater = statusupdater.NewFakeNodeStatusUpdater(true /* returnError */) rc.(*reconciler).nodeStatusUpdater = statusupdater.NewFakeNodeStatusUpdater(true /* returnError */)
reconciliationLoopFunc() reconciliationLoopFunc(ctx)
// The first detach will be triggered after at least 50ms (maxWaitForUnmountDuration in test). // The first detach will be triggered after at least 50ms (maxWaitForUnmountDuration in test).
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
reconciliationLoopFunc() reconciliationLoopFunc(ctx)
// Right before detach operation is performed, the volume will be first removed from being reported // Right before detach operation is performed, the volume will be first removed from being reported
// as attached on node status (RemoveVolumeFromReportAsAttached). After UpdateNodeStatus operation which is expected to fail, // as attached on node status (RemoveVolumeFromReportAsAttached). After UpdateNodeStatus operation which is expected to fail,
// controller then added the volume back as attached. // controller then added the volume back as attached.
@ -669,7 +683,7 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t *
// in node status. By calling this function (GetVolumesToReportAttached), node status should be updated, and the volume // in node status. By calling this function (GetVolumesToReportAttached), node status should be updated, and the volume
// will not need to be updated until new changes are applied (detach is triggered again) // will not need to be updated until new changes are applied (detach is triggered again)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
} }
@ -703,9 +717,10 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/) dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
// Act // Act
ch := make(chan struct{}) logger, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Add the pod in which the volume is attached to the FailDetachNode // Add the pod in which the volume is attached to the FailDetachNode
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1) generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
@ -717,7 +732,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// Volume is added to asw, volume should be reported as attached to the node. // Volume is added to asw, volume should be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// Delete the pod, but detach will fail // Delete the pod, but detach will fail
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
@ -732,7 +747,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// will not need to be updated until new changes are applied (detach is triggered again) // will not need to be updated until new changes are applied (detach is triggered again)
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// After the first detach fails, reconciler will wait for a period of time before retrying to detach. // After the first detach fails, reconciler will wait for a period of time before retrying to detach.
// The wait time is increasing exponentially from initial value of 0.5s (0.5, 1, 2, 4, ...). // The wait time is increasing exponentially from initial value of 0.5s (0.5, 1, 2, 4, ...).
@ -740,14 +755,14 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// the first detach operation. At this point, volumes status should not be updated // the first detach operation. At this point, volumes status should not be updated
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw) verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
// Wait for 600ms to make sure second detach operation triggered. Again, The volume will be // Wait for 600ms to make sure second detach operation triggered. Again, The volume will be
// removed from being reported as attached on node status and then added back as attached. // removed from being reported as attached on node status and then added back as attached.
// The volume will be in the list of attached volumes that need to be updated to node status. // The volume will be in the list of attached volumes that need to be updated to node status.
time.Sleep(600 * time.Millisecond) time.Sleep(600 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// Add a second pod which tries to attach the volume to the same node. // Add a second pod which tries to attach the volume to the same node.
// After adding pod to the same node, detach will not be triggered any more. // After adding pod to the same node, detach will not be triggered any more.
@ -758,7 +773,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// Sleep 1s to verify no detach are triggered after second pod is added in the future. // Sleep 1s to verify no detach are triggered after second pod is added in the future.
time.Sleep(1000 * time.Millisecond) time.Sleep(1000 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw) verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
// Add a third pod which tries to attach the volume to a different node. // Add a third pod which tries to attach the volume to a different node.
// At this point, volume is still attached to first node. There are no status update for both nodes. // At this point, volume is still attached to first node. There are no status update for both nodes.
@ -767,8 +782,8 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr) t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
} }
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw) verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName2, asw) verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName2, asw)
} }
// Creates a volume with accessMode ReadWriteOnce // Creates a volume with accessMode ReadWriteOnce
@ -805,9 +820,10 @@ func Test_Run_OneVolumeAttachAndDetachTimeoutNodesWithReadWriteOnce(t *testing.T
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/) dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
// Act // Act
ch := make(chan struct{}) logger, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Add the pod in which the volume is attached to the timeout node // Add the pod in which the volume is attached to the timeout node
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1) generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
@ -818,11 +834,11 @@ func Test_Run_OneVolumeAttachAndDetachTimeoutNodesWithReadWriteOnce(t *testing.T
// Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node. // Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateUncertain, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateUncertain, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, false, asw) verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, false, asw)
// When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse. // When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse.
// Without this, the delete operation will be delayed due to mounted status // Without this, the delete operation will be delayed due to mounted status
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName1, false /* mounted */) asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName1, false /* mounted */)
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
@ -895,9 +911,10 @@ func Test_Run_OneVolumeDetachOnOutOfServiceTaintedNode(t *testing.T) {
} }
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -973,9 +990,10 @@ func Test_Run_OneVolumeDetachOnNoOutOfServiceTaintedNode(t *testing.T) {
} }
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -1057,9 +1075,10 @@ func Test_Run_OneVolumeDetachOnUnhealthyNode(t *testing.T) {
} }
// Act // Act
ch := make(chan struct{}) _, ctx := ktesting.NewTestContext(t)
go reconciler.Run(ch) ctx, cancel := context.WithCancel(ctx)
defer close(ch) defer cancel()
go reconciler.Run(ctx)
// Assert // Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -1176,10 +1195,11 @@ func Test_ReportMultiAttachError(t *testing.T) {
} }
} }
// Act // Act
logger, _ := ktesting.NewTestContext(t)
volumes := dsw.GetVolumesToAttach() volumes := dsw.GetVolumesToAttach()
for _, vol := range volumes { for _, vol := range volumes {
if vol.NodeName == "node1" { if vol.NodeName == "node1" {
rc.(*reconciler).reportMultiAttachError(vol, nodes) rc.(*reconciler).reportMultiAttachError(logger, vol, nodes)
} }
} }
@ -1587,13 +1607,14 @@ func verifyVolumeAttachedToNode(
func verifyVolumeReportedAsAttachedToNode( func verifyVolumeReportedAsAttachedToNode(
t *testing.T, t *testing.T,
logger klog.Logger,
volumeName v1.UniqueVolumeName, volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName, nodeName k8stypes.NodeName,
isAttached bool, isAttached bool,
asw cache.ActualStateOfWorld, asw cache.ActualStateOfWorld,
) { ) {
result := false result := false
volumes := asw.GetVolumesToReportAttached() volumes := asw.GetVolumesToReportAttached(logger)
for _, volume := range volumes[nodeName] { for _, volume := range volumes[nodeName] {
if volume.Name == volumeName { if volume.Name == volumeName {
result = true result = true
@ -1614,11 +1635,12 @@ func verifyVolumeReportedAsAttachedToNode(
func verifyVolumeNoStatusUpdateNeeded( func verifyVolumeNoStatusUpdateNeeded(
t *testing.T, t *testing.T,
logger klog.Logger,
volumeName v1.UniqueVolumeName, volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName, nodeName k8stypes.NodeName,
asw cache.ActualStateOfWorld, asw cache.ActualStateOfWorld,
) { ) {
volumes := asw.GetVolumesToReportAttached() volumes := asw.GetVolumesToReportAttached(logger)
for _, volume := range volumes[nodeName] { for _, volume := range volumes[nodeName] {
if volume.Name == volumeName { if volume.Name == volumeName {
t.Fatalf("Check volume <%v> is reported as need to update status on node <%v>, expected false", t.Fatalf("Check volume <%v> is reported as need to update status on node <%v>, expected false",

View File

@ -18,6 +18,8 @@ package statusupdater
import ( import (
"fmt" "fmt"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
) )
@ -31,7 +33,7 @@ type fakeNodeStatusUpdater struct {
returnError bool returnError bool
} }
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error { func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses(logger klog.Logger) error {
if fnsu.returnError { if fnsu.returnError {
return fmt.Errorf("fake error on update node status") return fmt.Errorf("fake error on update node status")
} }
@ -39,7 +41,7 @@ func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error {
return nil return nil
} }
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatusForNode(nodeName types.NodeName) error { func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error {
if fnsu.returnError { if fnsu.returnError {
return fmt.Errorf("fake error on update node status") return fmt.Errorf("fake error on update node status")
} }

View File

@ -20,14 +20,13 @@ package statusupdater
import ( import (
"fmt" "fmt"
"k8s.io/klog/v2"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
nodeutil "k8s.io/component-helpers/node/util" nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
) )
@ -36,9 +35,9 @@ import (
type NodeStatusUpdater interface { type NodeStatusUpdater interface {
// Gets a list of node statuses that should be updated from the actual state // Gets a list of node statuses that should be updated from the actual state
// of the world and updates them. // of the world and updates them.
UpdateNodeStatuses() error UpdateNodeStatuses(logger klog.Logger) error
// Update any pending status change for the given node // Update any pending status change for the given node
UpdateNodeStatusForNode(nodeName types.NodeName) error UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error
} }
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater. // NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
@ -59,13 +58,13 @@ type nodeStatusUpdater struct {
actualStateOfWorld cache.ActualStateOfWorld actualStateOfWorld cache.ActualStateOfWorld
} }
func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { func (nsu *nodeStatusUpdater) UpdateNodeStatuses(logger klog.Logger) error {
var nodeIssues int var nodeIssues int
// TODO: investigate right behavior if nodeName is empty // TODO: investigate right behavior if nodeName is empty
// kubernetes/kubernetes/issues/37777 // kubernetes/kubernetes/issues/37777
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached() nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached(logger)
for nodeName, attachedVolumes := range nodesToUpdate { for nodeName, attachedVolumes := range nodesToUpdate {
err := nsu.processNodeVolumes(nodeName, attachedVolumes) err := nsu.processNodeVolumes(logger, nodeName, attachedVolumes)
if err != nil { if err != nil {
nodeIssues += 1 nodeIssues += 1
} }
@ -76,56 +75,50 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
return nil return nil
} }
func (nsu *nodeStatusUpdater) UpdateNodeStatusForNode(nodeName types.NodeName) error { func (nsu *nodeStatusUpdater) UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error {
needsUpdate, attachedVolumes := nsu.actualStateOfWorld.GetVolumesToReportAttachedForNode(nodeName) needsUpdate, attachedVolumes := nsu.actualStateOfWorld.GetVolumesToReportAttachedForNode(logger, nodeName)
if !needsUpdate { if !needsUpdate {
return nil return nil
} }
return nsu.processNodeVolumes(nodeName, attachedVolumes) return nsu.processNodeVolumes(logger, nodeName, attachedVolumes)
} }
func (nsu *nodeStatusUpdater) processNodeVolumes(nodeName types.NodeName, attachedVolumes []v1.AttachedVolume) error { func (nsu *nodeStatusUpdater) processNodeVolumes(logger klog.Logger, nodeName types.NodeName, attachedVolumes []v1.AttachedVolume) error {
nodeObj, err := nsu.nodeLister.Get(string(nodeName)) nodeObj, err := nsu.nodeLister.Get(string(nodeName))
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
// If node does not exist, its status cannot be updated. // If node does not exist, its status cannot be updated.
// Do nothing so that there is no retry until node is created. // Do nothing so that there is no retry until node is created.
klog.V(2).Infof( logger.V(2).Info(
"Could not update node status. Failed to find node %q in NodeInformer cache. Error: '%v'", "Could not update node status. Failed to find node in NodeInformer cache", "node", klog.KRef("", string(nodeName)), "err", err)
nodeName,
err)
return nil return nil
} else if err != nil { } else if err != nil {
// For all other errors, log error and reset flag statusUpdateNeeded // For all other errors, log error and reset flag statusUpdateNeeded
// back to true to indicate this node status needs to be updated again. // back to true to indicate this node status needs to be updated again.
klog.V(2).Infof("Error retrieving nodes from node lister. Error: %v", err) logger.V(2).Info("Error retrieving nodes from node lister", "err", err)
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName)
return err return err
} }
err = nsu.updateNodeStatus(nodeName, nodeObj, attachedVolumes) err = nsu.updateNodeStatus(logger, nodeName, nodeObj, attachedVolumes)
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
// If node does not exist, its status cannot be updated. // If node does not exist, its status cannot be updated.
// Do nothing so that there is no retry until node is created. // Do nothing so that there is no retry until node is created.
klog.V(2).Infof( logger.V(2).Info(
"Could not update node status for %q; node does not exist - skipping", "Could not update node status, node does not exist - skipping", "node", klog.KObj(nodeObj))
nodeName)
return nil return nil
} else if err != nil { } else if err != nil {
// If update node status fails, reset flag statusUpdateNeeded back to true // If update node status fails, reset flag statusUpdateNeeded back to true
// to indicate this node status needs to be updated again // to indicate this node status needs to be updated again
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName)
klog.V(2).Infof( logger.V(2).Info("Could not update node status; re-marking for update", "node", klog.KObj(nodeObj), "err", err)
"Could not update node status for %q; re-marking for update. %v",
nodeName,
err)
return err return err
} }
return nil return nil
} }
func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error { func (nsu *nodeStatusUpdater) updateNodeStatus(logger klog.Logger, nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error {
node := nodeObj.DeepCopy() node := nodeObj.DeepCopy()
node.Status.VolumesAttached = attachedVolumes node.Status.VolumesAttached = attachedVolumes
_, patchBytes, err := nodeutil.PatchNodeStatus(nsu.kubeClient.CoreV1(), nodeName, nodeObj, node) _, patchBytes, err := nodeutil.PatchNodeStatus(nsu.kubeClient.CoreV1(), nodeName, nodeObj, node)
@ -133,6 +126,6 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj
return err return err
} }
klog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes) logger.V(4).Info("Updating status for node succeeded", "node", klog.KObj(node), "patchBytes", patchBytes, "attachedVolumes", attachedVolumes)
return nil return nil
} }

View File

@ -20,6 +20,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
@ -27,6 +28,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
@ -37,7 +39,7 @@ import (
// setupNodeStatusUpdate creates all the needed objects for testing. // setupNodeStatusUpdate creates all the needed objects for testing.
// the initial environment has 2 nodes with no volumes attached // the initial environment has 2 nodes with no volumes attached
// and adds one volume to attach to each node to the actual state of the world // and adds one volume to attach to each node to the actual state of the world
func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualStateOfWorld, *fake.Clientset, NodeStatusUpdater) { func setupNodeStatusUpdate(logger klog.Logger, t *testing.T) (cache.ActualStateOfWorld, *fake.Clientset, NodeStatusUpdater) {
testNode1 := corev1.Node{ testNode1 := corev1.Node{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Node", Kind: "Node",
@ -83,11 +85,11 @@ func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualState
nodeName2 := types.NodeName("testnode-2") nodeName2 := types.NodeName("testnode-2")
devicePath := "fake/device/path" devicePath := "fake/device/path"
_, err = asw.AddVolumeNode(volumeName1, volumeSpec1, nodeName1, devicePath, true) _, err = asw.AddVolumeNode(logger, volumeName1, volumeSpec1, nodeName1, devicePath, true)
if err != nil { if err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err)
} }
_, err = asw.AddVolumeNode(volumeName2, volumeSpec2, nodeName2, devicePath, true) _, err = asw.AddVolumeNode(logger, volumeName2, volumeSpec2, nodeName2, devicePath, true)
if err != nil { if err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err)
} }
@ -101,14 +103,15 @@ func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualState
// checks that each node status.volumesAttached is of length 1 and contains the correct volume // checks that each node status.volumesAttached is of length 1 and contains the correct volume
func TestNodeStatusUpdater_UpdateNodeStatuses_TwoNodesUpdate(t *testing.T) { func TestNodeStatusUpdater_UpdateNodeStatuses_TwoNodesUpdate(t *testing.T) {
ctx := context.Background() ctx := context.Background()
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t) logger := klog.FromContext(ctx)
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t)
err := nsu.UpdateNodeStatuses() err := nsu.UpdateNodeStatuses(logger)
if err != nil { if err != nil {
t.Fatalf("UpdateNodeStatuses failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("UpdateNodeStatuses failed. Expected: <no error> Actual: <%v>", err)
} }
needToReport := asw.GetVolumesToReportAttached() needToReport := asw.GetVolumesToReportAttached(logger)
if len(needToReport) != 0 { if len(needToReport) != 0 {
t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <0> Actual: <%v>", len(needToReport)) t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <0> Actual: <%v>", len(needToReport))
} }
@ -138,7 +141,8 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_TwoNodesUpdate(t *testing.T) {
func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T) { func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T) {
ctx := context.Background() ctx := context.Background()
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t) logger := klog.FromContext(ctx)
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t)
var failedNode string var failedNode string
failedOnce := false failedOnce := false
@ -153,12 +157,12 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T)
return false, nil, nil return false, nil, nil
}) })
err := nsu.UpdateNodeStatuses() err := nsu.UpdateNodeStatuses(logger)
if errors.Is(err, failureErr) { if errors.Is(err, failureErr) {
t.Fatalf("UpdateNodeStatuses failed. Expected: <test generated error> Actual: <%v>", err) t.Fatalf("UpdateNodeStatuses failed. Expected: <test generated error> Actual: <%v>", err)
} }
needToReport := asw.GetVolumesToReportAttached() needToReport := asw.GetVolumesToReportAttached(logger)
if len(needToReport) != 1 { if len(needToReport) != 1 {
t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport)) t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport))
} }
@ -194,14 +198,15 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T)
// checks that testnode-1 status.volumesAttached is of length 1 and contains the correct volume // checks that testnode-1 status.volumesAttached is of length 1 and contains the correct volume
func TestNodeStatusUpdater_UpdateNodeStatusForNode(t *testing.T) { func TestNodeStatusUpdater_UpdateNodeStatusForNode(t *testing.T) {
ctx := context.Background() ctx := context.Background()
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t) logger := klog.FromContext(ctx)
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t)
err := nsu.UpdateNodeStatusForNode("testnode-1") err := nsu.UpdateNodeStatusForNode(logger, "testnode-1")
if err != nil { if err != nil {
t.Fatalf("UpdateNodeStatuses failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("UpdateNodeStatuses failed. Expected: <no error> Actual: <%v>", err)
} }
needToReport := asw.GetVolumesToReportAttached() needToReport := asw.GetVolumesToReportAttached(logger)
if len(needToReport) != 1 { if len(needToReport) != 1 {
t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport)) t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport))
} }

View File

@ -376,7 +376,7 @@ func (plugin *TestPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
plugin.pluginLock.Lock() plugin.pluginLock.Lock()
defer plugin.pluginLock.Unlock() defer plugin.pluginLock.Unlock()
if spec == nil { if spec == nil {
klog.Errorf("GetVolumeName called with nil volume spec") klog.ErrorS(nil, "GetVolumeName called with nil volume spec")
plugin.ErrorEncountered = true plugin.ErrorEncountered = true
return "", fmt.Errorf("GetVolumeName called with nil volume spec") return "", fmt.Errorf("GetVolumeName called with nil volume spec")
} }
@ -400,7 +400,7 @@ func (plugin *TestPlugin) CanSupport(spec *volume.Spec) bool {
plugin.pluginLock.Lock() plugin.pluginLock.Lock()
defer plugin.pluginLock.Unlock() defer plugin.pluginLock.Unlock()
if spec == nil { if spec == nil {
klog.Errorf("CanSupport called with nil volume spec") klog.ErrorS(nil, "CanSupport called with nil volume spec")
plugin.ErrorEncountered = true plugin.ErrorEncountered = true
} }
return true return true
@ -414,7 +414,7 @@ func (plugin *TestPlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts vol
plugin.pluginLock.Lock() plugin.pluginLock.Lock()
defer plugin.pluginLock.Unlock() defer plugin.pluginLock.Unlock()
if spec == nil { if spec == nil {
klog.Errorf("NewMounter called with nil volume spec") klog.ErrorS(nil, "NewMounter called with nil volume spec")
plugin.ErrorEncountered = true plugin.ErrorEncountered = true
} }
return nil, nil return nil, nil
@ -540,7 +540,7 @@ func (attacher *testPluginAttacher) Attach(spec *volume.Spec, nodeName types.Nod
defer attacher.pluginLock.Unlock() defer attacher.pluginLock.Unlock()
if spec == nil { if spec == nil {
*attacher.ErrorEncountered = true *attacher.ErrorEncountered = true
klog.Errorf("Attach called with nil volume spec") klog.ErrorS(nil, "Attach called with nil volume spec")
return "", fmt.Errorf("Attach called with nil volume spec") return "", fmt.Errorf("Attach called with nil volume spec")
} }
attacher.attachedVolumeMap[string(nodeName)] = append(attacher.attachedVolumeMap[string(nodeName)], spec.Name()) attacher.attachedVolumeMap[string(nodeName)] = append(attacher.attachedVolumeMap[string(nodeName)], spec.Name())
@ -556,7 +556,7 @@ func (attacher *testPluginAttacher) WaitForAttach(spec *volume.Spec, devicePath
defer attacher.pluginLock.Unlock() defer attacher.pluginLock.Unlock()
if spec == nil { if spec == nil {
*attacher.ErrorEncountered = true *attacher.ErrorEncountered = true
klog.Errorf("WaitForAttach called with nil volume spec") klog.ErrorS(nil, "WaitForAttach called with nil volume spec")
return "", fmt.Errorf("WaitForAttach called with nil volume spec") return "", fmt.Errorf("WaitForAttach called with nil volume spec")
} }
fakePath := fmt.Sprintf("%s/%s", devicePath, spec.Name()) fakePath := fmt.Sprintf("%s/%s", devicePath, spec.Name())
@ -568,7 +568,7 @@ func (attacher *testPluginAttacher) GetDeviceMountPath(spec *volume.Spec) (strin
defer attacher.pluginLock.Unlock() defer attacher.pluginLock.Unlock()
if spec == nil { if spec == nil {
*attacher.ErrorEncountered = true *attacher.ErrorEncountered = true
klog.Errorf("GetDeviceMountPath called with nil volume spec") klog.ErrorS(nil, "GetDeviceMountPath called with nil volume spec")
return "", fmt.Errorf("GetDeviceMountPath called with nil volume spec") return "", fmt.Errorf("GetDeviceMountPath called with nil volume spec")
} }
return "", nil return "", nil
@ -579,7 +579,7 @@ func (attacher *testPluginAttacher) MountDevice(spec *volume.Spec, devicePath st
defer attacher.pluginLock.Unlock() defer attacher.pluginLock.Unlock()
if spec == nil { if spec == nil {
*attacher.ErrorEncountered = true *attacher.ErrorEncountered = true
klog.Errorf("MountDevice called with nil volume spec") klog.ErrorS(nil, "MountDevice called with nil volume spec")
return fmt.Errorf("MountDevice called with nil volume spec") return fmt.Errorf("MountDevice called with nil volume spec")
} }
return nil return nil

View File

@ -38,7 +38,7 @@ import (
// A volume.Spec that refers to an in-tree plugin spec is translated to refer // A volume.Spec that refers to an in-tree plugin spec is translated to refer
// to a migrated CSI plugin spec if all conditions for CSI migration on a node // to a migrated CSI plugin spec if all conditions for CSI migration on a node
// for the in-tree plugin is satisfied. // for the in-tree plugin is satisfied.
func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) { func CreateVolumeSpec(logger klog.Logger, podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) {
claimName := "" claimName := ""
readOnly := false readOnly := false
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
@ -50,10 +50,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
claimName = ephemeral.VolumeClaimName(pod, &podVolume) claimName = ephemeral.VolumeClaimName(pod, &podVolume)
} }
if claimName != "" { if claimName != "" {
klog.V(10).Infof( logger.V(10).Info("Found PVC", "PVC", klog.KRef(pod.Namespace, claimName))
"Found PVC, ClaimName: %q/%q",
pod.Namespace,
claimName)
// If podVolume is a PVC, fetch the real PV behind the claim // If podVolume is a PVC, fetch the real PV behind the claim
pvc, err := getPVCFromCache(pod.Namespace, claimName, pvcLister) pvc, err := getPVCFromCache(pod.Namespace, claimName, pvcLister)
@ -71,12 +68,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
} }
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
klog.V(10).Infof( logger.V(10).Info("Found bound PV for PVC", "PVC", klog.KRef(pod.Namespace, claimName), "pvcUID", pvcUID, "PV", klog.KRef("", pvName))
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
pod.Namespace,
claimName,
pvcUID,
pvName)
// Fetch actual PV object // Fetch actual PV object
volumeSpec, err := getPVSpecFromCache( volumeSpec, err := getPVSpecFromCache(
@ -98,13 +90,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
err) err)
} }
klog.V(10).Infof( logger.V(10).Info("Extracted volumeSpec from bound PV and PVC", "PVC", klog.KRef(pod.Namespace, claimName), "pvcUID", pvcUID, "PV", klog.KRef("", pvName), "volumeSpecName", volumeSpec.Name())
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
volumeSpec.Name(),
pvName,
pod.Namespace,
claimName,
pvcUID)
return volumeSpec, nil return volumeSpec, nil
} }
@ -199,59 +185,39 @@ func DetermineVolumeAction(pod *v1.Pod, desiredStateOfWorld cache.DesiredStateOf
// ProcessPodVolumes processes the volumes in the given pod and adds them to the // ProcessPodVolumes processes the volumes in the given pod and adds them to the
// desired state of the world if addVolumes is true, otherwise it removes them. // desired state of the world if addVolumes is true, otherwise it removes them.
func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.DesiredStateOfWorld, volumePluginMgr *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) { func ProcessPodVolumes(logger klog.Logger, pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.DesiredStateOfWorld, volumePluginMgr *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) {
if pod == nil { if pod == nil {
return return
} }
if len(pod.Spec.Volumes) <= 0 { if len(pod.Spec.Volumes) <= 0 {
klog.V(10).Infof("Skipping processing of pod %q/%q: it has no volumes.", logger.V(10).Info("Skipping processing of pod, it has no volumes", "pod", klog.KObj(pod))
pod.Namespace,
pod.Name)
return return
} }
nodeName := types.NodeName(pod.Spec.NodeName) nodeName := types.NodeName(pod.Spec.NodeName)
if nodeName == "" { if nodeName == "" {
klog.V(10).Infof( logger.V(10).Info("Skipping processing of pod, it is not scheduled to a node", "pod", klog.KObj(pod))
"Skipping processing of pod %q/%q: it is not scheduled to a node.",
pod.Namespace,
pod.Name)
return return
} else if !desiredStateOfWorld.NodeExists(nodeName) { } else if !desiredStateOfWorld.NodeExists(nodeName) {
// If the node the pod is scheduled to does not exist in the desired // If the node the pod is scheduled to does not exist in the desired
// state of the world data structure, that indicates the node is not // state of the world data structure, that indicates the node is not
// yet managed by the controller. Therefore, ignore the pod. // yet managed by the controller. Therefore, ignore the pod.
klog.V(4).Infof( logger.V(4).Info("Skipping processing of pod, it is scheduled to node which is not managed by the controller", "node", klog.KRef("", string(nodeName)), "pod", klog.KObj(pod))
"Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.",
pod.Namespace,
pod.Name,
nodeName)
return return
} }
// Process volume spec for each volume defined in pod // Process volume spec for each volume defined in pod
for _, podVolume := range pod.Spec.Volumes { for _, podVolume := range pod.Spec.Volumes {
volumeSpec, err := CreateVolumeSpec(podVolume, pod, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator) volumeSpec, err := CreateVolumeSpec(logger, podVolume, pod, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator)
if err != nil { if err != nil {
klog.V(10).Infof( logger.V(10).Info("Error processing volume for pod", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
"Error processing volume %q for pod %q/%q: %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
continue continue
} }
attachableVolumePlugin, err := attachableVolumePlugin, err :=
volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil { if err != nil || attachableVolumePlugin == nil {
klog.V(10).Infof( logger.V(10).Info("Skipping volume for pod, it does not implement attacher interface", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
"Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
continue continue
} }
@ -261,12 +227,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
_, err := desiredStateOfWorld.AddPod( _, err := desiredStateOfWorld.AddPod(
uniquePodName, pod, volumeSpec, nodeName) uniquePodName, pod, volumeSpec, nodeName)
if err != nil { if err != nil {
klog.V(10).Infof( logger.V(10).Info("Failed to add volume for pod to desiredStateOfWorld", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
"Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
} }
} else { } else {
@ -274,12 +235,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec( uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec) attachableVolumePlugin, volumeSpec)
if err != nil { if err != nil {
klog.V(10).Infof( logger.V(10).Info("Failed to delete volume for pod from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed with %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
continue continue
} }
desiredStateOfWorld.DeletePod( desiredStateOfWorld.DeletePod(

View File

@ -30,6 +30,7 @@ import (
kubetypes "k8s.io/apimachinery/pkg/types" kubetypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake" fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
"k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/fc" "k8s.io/kubernetes/pkg/volume/fc"
@ -241,8 +242,9 @@ func Test_CreateVolumeSpec(t *testing.T) {
}, },
} { } {
t.Run(test.desc, func(t *testing.T) { t.Run(test.desc, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
plugMgr, intreeToCSITranslator, csiTranslator, pvLister, pvcLister := setup(testNodeName, t) plugMgr, intreeToCSITranslator, csiTranslator, pvLister, pvcLister := setup(testNodeName, t)
actualSpec, err := CreateVolumeSpec(test.pod.Spec.Volumes[0], test.pod, test.createNodeName, plugMgr, pvcLister, pvLister, intreeToCSITranslator, csiTranslator) actualSpec, err := CreateVolumeSpec(logger, test.pod.Spec.Volumes[0], test.pod, test.createNodeName, plugMgr, pvcLister, pvLister, intreeToCSITranslator, csiTranslator)
if actualSpec == nil && (test.wantPersistentVolume != nil || test.wantVolume != nil) { if actualSpec == nil && (test.wantPersistentVolume != nil || test.wantVolume != nil) {
t.Errorf("got volume spec is nil") t.Errorf("got volume spec is nil")

View File

@ -165,9 +165,9 @@ func (ec *ephemeralController) onPVCDelete(obj interface{}) {
func (ec *ephemeralController) Run(ctx context.Context, workers int) { func (ec *ephemeralController) Run(ctx context.Context, workers int) {
defer runtime.HandleCrash() defer runtime.HandleCrash()
defer ec.queue.ShutDown() defer ec.queue.ShutDown()
logger := klog.FromContext(ctx)
klog.Infof("Starting ephemeral volume controller") logger.Info("Starting ephemeral volume controller")
defer klog.Infof("Shutting down ephemeral volume controller") defer logger.Info("Shutting down ephemeral volume controller")
if !cache.WaitForNamedCacheSync("ephemeral", ctx.Done(), ec.podSynced, ec.pvcsSynced) { if !cache.WaitForNamedCacheSync("ephemeral", ctx.Done(), ec.podSynced, ec.pvcsSynced) {
return return
@ -212,18 +212,19 @@ func (ec *ephemeralController) syncHandler(ctx context.Context, key string) erro
return err return err
} }
pod, err := ec.podLister.Pods(namespace).Get(name) pod, err := ec.podLister.Pods(namespace).Get(name)
logger := klog.FromContext(ctx)
if err != nil { if err != nil {
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is gone", key) logger.V(5).Info("Ephemeral: nothing to do for pod, it is gone", "podKey", key)
return nil return nil
} }
klog.V(5).Infof("Error getting pod %s/%s (uid: %q) from informer : %v", pod.Namespace, pod.Name, pod.UID, err) logger.V(5).Info("Error getting pod from informer", "pod", klog.KObj(pod), "podUID", pod.UID, "err", err)
return err return err
} }
// Ignore pods which are already getting deleted. // Ignore pods which are already getting deleted.
if pod.DeletionTimestamp != nil { if pod.DeletionTimestamp != nil {
klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is marked for deletion", key) logger.V(5).Info("Ephemeral: nothing to do for pod, it is marked for deletion", "podKey", key)
return nil return nil
} }
@ -239,7 +240,8 @@ func (ec *ephemeralController) syncHandler(ctx context.Context, key string) erro
// handleEphemeralVolume is invoked for each volume of a pod. // handleEphemeralVolume is invoked for each volume of a pod.
func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vol v1.Volume) error { func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vol v1.Volume) error {
klog.V(5).Infof("ephemeral: checking volume %s", vol.Name) logger := klog.FromContext(ctx)
logger.V(5).Info("Ephemeral: checking volume", "volumeName", vol.Name)
if vol.Ephemeral == nil { if vol.Ephemeral == nil {
return nil return nil
} }
@ -254,7 +256,7 @@ func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vo
return err return err
} }
// Already created, nothing more to do. // Already created, nothing more to do.
klog.V(5).Infof("ephemeral: volume %s: PVC %s already created", vol.Name, pvcName) logger.V(5).Info("Ephemeral: PVC already created", "volumeName", vol.Name, "PVC", klog.KObj(pvc))
return nil return nil
} }

View File

@ -219,20 +219,21 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
return nil return nil
} }
logger := klog.FromContext(ctx)
if err != nil { if err != nil {
klog.V(5).Infof("Error getting PVC %q from informer : %v", key, err) logger.V(5).Info("Error getting PVC from informer", "pvcKey", key, "err", err)
return err return err
} }
pv, err := expc.getPersistentVolume(ctx, pvc) pv, err := expc.getPersistentVolume(ctx, pvc)
if err != nil { if err != nil {
klog.V(5).Infof("Error getting Persistent Volume for PVC %q (uid: %q) from informer : %v", key, pvc.UID, err) logger.V(5).Info("Error getting Persistent Volume for PVC from informer", "pvcKey", key, "pvcUID", pvc.UID, "err", err)
return err return err
} }
if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.UID != pv.Spec.ClaimRef.UID { if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.UID != pv.Spec.ClaimRef.UID {
err := fmt.Errorf("persistent Volume is not bound to PVC being updated : %s", key) err := fmt.Errorf("persistent Volume is not bound to PVC being updated : %s", key)
klog.V(4).Infof("%v", err) logger.V(4).Info("", "err", err)
return err return err
} }
@ -249,14 +250,14 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false) volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec) migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec)
if err != nil { if err != nil {
klog.V(4).Infof("failed to check CSI migration status for PVC: %s with error: %v", key, err) logger.V(4).Info("Failed to check CSI migration status for PVC with error", "pvcKey", key, "err", err)
return nil return nil
} }
// handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree // handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree
if migratable { if migratable {
inTreePluginName, err := expc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(volumeSpec.PersistentVolume, volumeSpec.Volume) inTreePluginName, err := expc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(volumeSpec.PersistentVolume, volumeSpec.Volume)
if err != nil { if err != nil {
klog.V(4).Infof("Error getting in-tree plugin name from persistent volume %s: %v", volumeSpec.PersistentVolume.Name, err) logger.V(4).Info("Error getting in-tree plugin name from persistent volume", "volumeName", volumeSpec.PersistentVolume.Name, "err", err)
return err return err
} }
@ -286,46 +287,45 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
eventType = v1.EventTypeWarning eventType = v1.EventTypeWarning
} }
expc.recorder.Event(pvc, eventType, events.ExternalExpanding, msg) expc.recorder.Event(pvc, eventType, events.ExternalExpanding, msg)
klog.Infof("waiting for an external controller to expand the PVC %q (uid: %q)", key, pvc.UID) logger.Info("Waiting for an external controller to expand the PVC", "pvcKey", key, "pvcUID", pvc.UID)
// If we are expecting that an external plugin will handle resizing this volume then // If we are expecting that an external plugin will handle resizing this volume then
// is no point in requeuing this PVC. // is no point in requeuing this PVC.
return nil return nil
} }
volumeResizerName := volumePlugin.GetPluginName() volumeResizerName := volumePlugin.GetPluginName()
return expc.expand(pvc, pv, volumeResizerName) return expc.expand(logger, pvc, pv, volumeResizerName)
} }
func (expc *expandController) expand(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, resizerName string) error { func (expc *expandController) expand(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, resizerName string) error {
// if node expand is complete and pv's annotation can be removed, remove the annotation from pv and return // if node expand is complete and pv's annotation can be removed, remove the annotation from pv and return
if expc.isNodeExpandComplete(pvc, pv) && metav1.HasAnnotation(pv.ObjectMeta, util.AnnPreResizeCapacity) { if expc.isNodeExpandComplete(logger, pvc, pv) && metav1.HasAnnotation(pv.ObjectMeta, util.AnnPreResizeCapacity) {
return util.DeleteAnnPreResizeCapacity(pv, expc.GetKubeClient()) return util.DeleteAnnPreResizeCapacity(pv, expc.GetKubeClient())
} }
var generatedOptions volumetypes.GeneratedOperations var generatedOptions volumetypes.GeneratedOperations
var err error var err error
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) { if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
generatedOptions, err = expc.operationGenerator.GenerateExpandAndRecoverVolumeFunc(pvc, pv, resizerName) generatedOptions, err = expc.operationGenerator.GenerateExpandAndRecoverVolumeFunc(pvc, pv, resizerName)
if err != nil { if err != nil {
klog.Errorf("Error starting ExpandVolume for pvc %s with %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err) logger.Error(err, "Error starting ExpandVolume for pvc", "PVC", klog.KObj(pvc))
return err return err
} }
} else { } else {
pvc, err := util.MarkResizeInProgressWithResizer(pvc, resizerName, expc.kubeClient) pvc, err := util.MarkResizeInProgressWithResizer(pvc, resizerName, expc.kubeClient)
if err != nil { if err != nil {
klog.Errorf("Error setting PVC %s in progress with error : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err) logger.Error(err, "Error setting PVC in progress with error", "PVC", klog.KObj(pvc), "err", err)
return err return err
} }
generatedOptions, err = expc.operationGenerator.GenerateExpandVolumeFunc(pvc, pv) generatedOptions, err = expc.operationGenerator.GenerateExpandVolumeFunc(pvc, pv)
if err != nil { if err != nil {
klog.Errorf("Error starting ExpandVolume for pvc %s with %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err) logger.Error(err, "Error starting ExpandVolume for pvc with error", "PVC", klog.KObj(pvc), "err", err)
return err return err
} }
} }
klog.V(5).Infof("Starting ExpandVolume for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc)) logger.V(5).Info("Starting ExpandVolume for volume", "volumeName", util.GetPersistentVolumeClaimQualifiedName(pvc))
_, detailedErr := generatedOptions.Run() _, detailedErr := generatedOptions.Run()
return detailedErr return detailedErr
@ -335,9 +335,9 @@ func (expc *expandController) expand(pvc *v1.PersistentVolumeClaim, pv *v1.Persi
func (expc *expandController) Run(ctx context.Context) { func (expc *expandController) Run(ctx context.Context) {
defer runtime.HandleCrash() defer runtime.HandleCrash()
defer expc.queue.ShutDown() defer expc.queue.ShutDown()
logger := klog.FromContext(ctx)
klog.Infof("Starting expand controller") logger.Info("Starting expand controller")
defer klog.Infof("Shutting down expand controller") defer logger.Info("Shutting down expand controller")
if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced, expc.pvSynced) { if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced, expc.pvSynced) {
return return
@ -367,8 +367,8 @@ func (expc *expandController) getPersistentVolume(ctx context.Context, pvc *v1.P
} }
// isNodeExpandComplete returns true if pvc.Status.Capacity >= pv.Spec.Capacity // isNodeExpandComplete returns true if pvc.Status.Capacity >= pv.Spec.Capacity
func (expc *expandController) isNodeExpandComplete(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool { func (expc *expandController) isNodeExpandComplete(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool {
klog.V(4).Infof("pv %q capacity = %v, pvc %s capacity = %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage], pvc.ObjectMeta.Name, pvc.Status.Capacity[v1.ResourceStorage]) logger.V(4).Info("pv and pvc capacity", "PV", klog.KObj(pv), "pvCapacity", pv.Spec.Capacity[v1.ResourceStorage], "PVC", klog.KObj(pvc), "pvcCapacity", pvc.Status.Capacity[v1.ResourceStorage])
pvcSpecCap := pvc.Spec.Resources.Requests.Storage() pvcSpecCap := pvc.Spec.Resources.Requests.Storage()
pvcStatusCap, pvCap := pvc.Status.Capacity[v1.ResourceStorage], pv.Spec.Capacity[v1.ResourceStorage] pvcStatusCap, pvCap := pvc.Status.Capacity[v1.ResourceStorage], pv.Spec.Capacity[v1.ResourceStorage]
@ -469,7 +469,7 @@ func (expc *expandController) GetServiceAccountTokenFunc() func(_, _ string, _ *
func (expc *expandController) DeleteServiceAccountTokenFunc() func(types.UID) { func (expc *expandController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) { return func(types.UID) {
klog.Errorf("DeleteServiceAccountToken unsupported in expandController") klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in expandController")
} }
} }

View File

@ -23,6 +23,7 @@ import (
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/component-helpers/storage/volume" "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2/ktesting"
) )
// Test single call to syncClaim and syncVolume methods. // Test single call to syncClaim and syncVolume methods.
@ -749,8 +750,8 @@ func TestSync(t *testing.T) {
test: testSyncClaim, test: testSyncClaim,
}, },
} }
_, ctx := ktesting.NewTestContext(t)
runSyncTests(t, tests, []*storage.StorageClass{ runSyncTests(t, ctx, tests, []*storage.StorageClass{
{ {
ObjectMeta: metav1.ObjectMeta{Name: classWait}, ObjectMeta: metav1.ObjectMeta{Name: classWait},
VolumeBindingMode: &modeWait, VolumeBindingMode: &modeWait,
@ -964,8 +965,8 @@ func TestSyncBlockVolume(t *testing.T) {
test: testSyncVolume, test: testSyncVolume,
}, },
} }
_, ctx := ktesting.NewTestContext(t)
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{}) runSyncTests(t, ctx, tests, []*storage.StorageClass{}, []*v1.Pod{})
} }
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -1016,6 +1017,6 @@ func TestMultiSync(t *testing.T) {
test: testSyncClaim, test: testSyncClaim,
}, },
} }
_, ctx := ktesting.NewTestContext(t)
runMultisyncTests(t, tests, []*storage.StorageClass{}, "") runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "")
} }

View File

@ -18,15 +18,16 @@ package persistentvolume
import ( import (
"errors" "errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-helpers/storage/volume" "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2/ktesting"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
"k8s.io/kubernetes/pkg/features"
) )
// Test single call to syncVolume, expecting recycling to happen. // Test single call to syncVolume, expecting recycling to happen.
@ -37,6 +38,7 @@ func TestDeleteSync(t *testing.T) {
const gceDriver = "pd.csi.storage.gke.io" const gceDriver = "pd.csi.storage.gke.io"
// Default enable the HonorPVReclaimPolicy feature gate. // Default enable the HonorPVReclaimPolicy feature gate.
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)()
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{ tests := []controllerTest{
{ {
// delete volume bound by controller // delete volume bound by controller
@ -106,7 +108,7 @@ func TestDeleteSync(t *testing.T) {
expectedClaims: noclaims, expectedClaims: noclaims,
expectedEvents: noevents, expectedEvents: noevents,
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Delete the volume before delete operation starts // Delete the volume before delete operation starts
reactor.DeleteVolume("volume8-6") reactor.DeleteVolume("volume8-6")
}), }),
@ -122,7 +124,7 @@ func TestDeleteSync(t *testing.T) {
expectedClaims: newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil), expectedClaims: newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil),
expectedEvents: noevents, expectedEvents: noevents,
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Bind the volume to resurrected claim (this should never // Bind the volume to resurrected claim (this should never
// happen) // happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil) claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil)
@ -217,7 +219,7 @@ func TestDeleteSync(t *testing.T) {
test: wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), test: wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
}, },
} }
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{}) runSyncTests(t, ctx, tests, []*storage.StorageClass{}, []*v1.Pod{})
} }
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -250,6 +252,6 @@ func TestDeleteMultiSync(t *testing.T) {
test: wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), test: wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
}, },
} }
_, ctx := ktesting.NewTestContext(t)
runMultisyncTests(t, tests, []*storage.StorageClass{}, "") runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "")
} }

View File

@ -117,9 +117,9 @@ type volumeReactor struct {
ctrl *PersistentVolumeController ctrl *PersistentVolumeController
} }
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []pvtesting.ReactorError) *volumeReactor { func newVolumeReactor(ctx context.Context, client *fake.Clientset, ctrl *PersistentVolumeController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []pvtesting.ReactorError) *volumeReactor {
return &volumeReactor{ return &volumeReactor{
pvtesting.NewVolumeReactor(client, fakeVolumeWatch, fakeClaimWatch, errors), pvtesting.NewVolumeReactor(ctx, client, fakeVolumeWatch, fakeClaimWatch, errors),
ctrl, ctrl,
} }
} }
@ -170,14 +170,14 @@ func (r *volumeReactor) waitTest(test controllerTest) error {
// checkEvents compares all expectedEvents with events generated during the test // checkEvents compares all expectedEvents with events generated during the test
// and reports differences. // and reports differences.
func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error { func checkEvents(t *testing.T, ctx context.Context, expectedEvents []string, ctrl *PersistentVolumeController) error {
var err error var err error
// Read recorded events - wait up to 1 minute to get all the expected ones // Read recorded events - wait up to 1 minute to get all the expected ones
// (just in case some goroutines are slower with writing) // (just in case some goroutines are slower with writing)
timer := time.NewTimer(time.Minute) timer := time.NewTimer(time.Minute)
defer timer.Stop() defer timer.Stop()
logger := klog.FromContext(ctx)
fakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder) fakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)
gotEvents := []string{} gotEvents := []string{}
finished := false finished := false
@ -185,14 +185,14 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo
select { select {
case event, ok := <-fakeRecorder.Events: case event, ok := <-fakeRecorder.Events:
if ok { if ok {
klog.V(5).Infof("event recorder got event %s", event) logger.V(5).Info("Event recorder got event", "event", event)
gotEvents = append(gotEvents, event) gotEvents = append(gotEvents, event)
} else { } else {
klog.V(5).Infof("event recorder finished") logger.V(5).Info("Event recorder finished")
finished = true finished = true
} }
case _, _ = <-timer.C: case _, _ = <-timer.C:
klog.V(5).Infof("event recorder timeout") logger.V(5).Info("Event recorder timeout")
finished = true finished = true
} }
} }
@ -219,7 +219,7 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo
func alwaysReady() bool { return true } func alwaysReady() bool { return true }
func newTestController(kubeClient clientset.Interface, informerFactory informers.SharedInformerFactory, enableDynamicProvisioning bool) (*PersistentVolumeController, error) { func newTestController(ctx context.Context, kubeClient clientset.Interface, informerFactory informers.SharedInformerFactory, enableDynamicProvisioning bool) (*PersistentVolumeController, error) {
if informerFactory == nil { if informerFactory == nil {
informerFactory = informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) informerFactory = informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
} }
@ -235,7 +235,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
EventRecorder: record.NewFakeRecorder(1000), EventRecorder: record.NewFakeRecorder(1000),
EnableDynamicProvisioning: enableDynamicProvisioning, EnableDynamicProvisioning: enableDynamicProvisioning,
} }
ctrl, err := NewController(params) ctrl, err := NewController(ctx, params)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to construct persistentvolume controller: %v", err) return nil, fmt.Errorf("failed to construct persistentvolume controller: %v", err)
} }
@ -586,18 +586,18 @@ const operationDelete = "Delete"
const operationRecycle = "Recycle" const operationRecycle = "Recycle"
var ( var (
classGold string = "gold" classGold = "gold"
classSilver string = "silver" classSilver = "silver"
classCopper string = "copper" classCopper = "copper"
classEmpty string = "" classEmpty = ""
classNonExisting string = "non-existing" classNonExisting = "non-existing"
classExternal string = "external" classExternal = "external"
classExternalWait string = "external-wait" classExternalWait = "external-wait"
classUnknownInternal string = "unknown-internal" classUnknownInternal = "unknown-internal"
classUnsupportedMountOptions string = "unsupported-mountoptions" classUnsupportedMountOptions = "unsupported-mountoptions"
classLarge string = "large" classLarge = "large"
classWait string = "wait" classWait = "wait"
classCSI string = "csi" classCSI = "csi"
modeWait = storage.VolumeBindingWaitForFirstConsumer modeWait = storage.VolumeBindingWaitForFirstConsumer
) )
@ -670,13 +670,13 @@ func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall {
// injected function to simulate that something is happening when the // injected function to simulate that something is happening when the
// controller waits for the operation lock. Controller is then resumed and we // controller waits for the operation lock. Controller is then resumed and we
// check how it behaves. // check how it behaves.
func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall { func wrapTestWithInjectedOperation(ctx context.Context, toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall {
return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error { return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
// Inject a hook before async operation starts // Inject a hook before async operation starts
ctrl.preOperationHook = func(operationName string) { ctrl.preOperationHook = func(operationName string) {
// Inside the hook, run the function to inject // Inside the hook, run the function to inject
klog.V(4).Infof("reactor: scheduleOperation reached, injecting call") klog.FromContext(ctx).V(4).Info("Reactor: scheduleOperation reached, injecting call")
injectBeforeOperation(ctrl, reactor) injectBeforeOperation(ctrl, reactor)
} }
@ -700,7 +700,7 @@ func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(c
} }
} }
func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest, t *testing.T) { func evaluateTestResults(ctx context.Context, ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest, t *testing.T) {
// Evaluate results // Evaluate results
if err := reactor.CheckClaims(test.expectedClaims); err != nil { if err := reactor.CheckClaims(test.expectedClaims); err != nil {
t.Errorf("Test %q: %v", test.name, err) t.Errorf("Test %q: %v", test.name, err)
@ -710,7 +710,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo
t.Errorf("Test %q: %v", test.name, err) t.Errorf("Test %q: %v", test.name, err)
} }
if err := checkEvents(t, test.expectedEvents, ctrl); err != nil { if err := checkEvents(t, ctx, test.expectedEvents, ctrl); err != nil {
t.Errorf("Test %q: %v", test.name, err) t.Errorf("Test %q: %v", test.name, err)
} }
} }
@ -721,15 +721,15 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo
// 2. Call the tested function (syncClaim/syncVolume) via // 2. Call the tested function (syncClaim/syncVolume) via
// controllerTest.testCall *once*. // controllerTest.testCall *once*.
// 3. Compare resulting volumes and claims with expected volumes and claims. // 3. Compare resulting volumes and claims with expected volumes and claims.
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) { func runSyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
doit := func(t *testing.T, test controllerTest) { doit := func(t *testing.T, test controllerTest) {
// Initialize the controller // Initialize the controller
client := &fake.Clientset{} client := &fake.Clientset{}
ctrl, err := newTestController(client, nil, true) ctrl, err := newTestController(ctx, client, nil, true)
if err != nil { if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err) t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
} }
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) reactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)
for _, claim := range test.initialClaims { for _, claim := range test.initialClaims {
if metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) { if metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) {
continue continue
@ -771,7 +771,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
t.Errorf("Test %q failed: %v", test.name, err) t.Errorf("Test %q failed: %v", test.name, err)
} }
evaluateTestResults(ctrl, reactor.VolumeReactor, test, t) evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)
} }
for _, test := range tests { for _, test := range tests {
@ -797,13 +797,14 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
// of volumes/claims with expected claims/volumes and report differences. // of volumes/claims with expected claims/volumes and report differences.
// //
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) { func runMultisyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {
logger := klog.FromContext(ctx)
run := func(t *testing.T, test controllerTest) { run := func(t *testing.T, test controllerTest) {
klog.V(4).Infof("starting multisync test %q", test.name) logger.V(4).Info("Starting multisync test", "testName", test.name)
// Initialize the controller // Initialize the controller
client := &fake.Clientset{} client := &fake.Clientset{}
ctrl, err := newTestController(client, nil, true) ctrl, err := newTestController(ctx, client, nil, true)
if err != nil { if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err) t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
} }
@ -815,7 +816,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
} }
ctrl.classLister = storagelisters.NewStorageClassLister(indexer) ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) reactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)
for _, claim := range test.initialClaims { for _, claim := range test.initialClaims {
ctrl.claims.Add(claim) ctrl.claims.Add(claim)
} }
@ -837,7 +838,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
counter := 0 counter := 0
for { for {
counter++ counter++
klog.V(4).Infof("test %q: iteration %d", test.name, counter) logger.V(4).Info("Test", "testName", test.name, "iteration", counter)
if counter > 100 { if counter > 100 {
t.Errorf("Test %q failed: too many iterations", test.name) t.Errorf("Test %q failed: too many iterations", test.name)
@ -847,7 +848,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
// Wait for all goroutines to finish // Wait for all goroutines to finish
reactor.waitForIdle() reactor.waitForIdle()
obj := reactor.PopChange() obj := reactor.PopChange(ctx)
if obj == nil { if obj == nil {
// Nothing was changed, should we exit? // Nothing was changed, should we exit?
if firstSync || reactor.GetChangeCount() > 0 { if firstSync || reactor.GetChangeCount() > 0 {
@ -855,7 +856,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
// Simulate "periodic sync" of everything (until it produces // Simulate "periodic sync" of everything (until it produces
// no changes). // no changes).
firstSync = false firstSync = false
klog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name) logger.V(4).Info("Test simulating periodical sync of all claims and volumes", "testName", test.name)
reactor.SyncAll() reactor.SyncAll()
} else { } else {
// Last sync did not produce any updates, the test reached // Last sync did not produce any updates, the test reached
@ -876,7 +877,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil { if err != nil {
if err == pvtesting.ErrVersionConflict { if err == pvtesting.ErrVersionConflict {
// Ignore version errors // Ignore version errors
klog.V(4).Infof("test intentionally ignores version error.") logger.V(4).Info("Test intentionally ignores version error")
} else { } else {
t.Errorf("Error calling syncClaim: %v", err) t.Errorf("Error calling syncClaim: %v", err)
// Finish the loop on the first error // Finish the loop on the first error
@ -893,7 +894,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil { if err != nil {
if err == pvtesting.ErrVersionConflict { if err == pvtesting.ErrVersionConflict {
// Ignore version errors // Ignore version errors
klog.V(4).Infof("test intentionally ignores version error.") logger.V(4).Info("Test intentionally ignores version error")
} else { } else {
t.Errorf("Error calling syncVolume: %v", err) t.Errorf("Error calling syncVolume: %v", err)
// Finish the loop on the first error // Finish the loop on the first error
@ -904,8 +905,8 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
continue continue
} }
} }
evaluateTestResults(ctrl, reactor.VolumeReactor, test, t) evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)
klog.V(4).Infof("test %q finished after %d iterations", test.name, counter) logger.V(4).Info("Test finished after iterations", "testName", test.name, "iterations", counter)
} }
for _, test := range tests { for _, test := range tests {
@ -985,10 +986,10 @@ func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol
// Provisioner interfaces // Provisioner interfaces
func (plugin *mockVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *mockVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
if len(plugin.provisionCalls) > 0 { if len(plugin.provisionCalls) > 0 {
// mockVolumePlugin directly implements Provisioner interface // mockVolumePlugin directly implements Provisioner interface
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner") logger.V(4).Info("Mock plugin NewProvisioner called, returning mock provisioner")
plugin.provisionOptions = options plugin.provisionOptions = options
return plugin, nil return plugin, nil
} else { } else {
@ -1000,11 +1001,10 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
if len(plugin.provisionCalls) <= plugin.provisionCallCounter { if len(plugin.provisionCalls) <= plugin.provisionCallCounter {
return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter)
} }
var pv *v1.PersistentVolume var pv *v1.PersistentVolume
call := plugin.provisionCalls[plugin.provisionCallCounter] call := plugin.provisionCalls[plugin.provisionCallCounter]
if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) { if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) {
klog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters) klog.TODO().Error(nil, "Invalid provisioner call", "gotOptions", plugin.provisionOptions.Parameters, "expectedOptions", call.expectedParameters)
return nil, fmt.Errorf("Mock plugin error: invalid provisioner call") return nil, fmt.Errorf("Mock plugin error: invalid provisioner call")
} }
if call.ret == nil { if call.ret == nil {
@ -1033,16 +1033,16 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
} }
plugin.provisionCallCounter++ plugin.provisionCallCounter++
klog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret) klog.TODO().V(4).Info("Mock plugin Provision call nr", "provisionCallCounter", plugin.provisionCallCounter, "pv", klog.KObj(pv), "err", call.ret)
return pv, call.ret return pv, call.ret
} }
// Deleter interfaces // Deleter interfaces
func (plugin *mockVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *mockVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
if len(plugin.deleteCalls) > 0 { if len(plugin.deleteCalls) > 0 {
// mockVolumePlugin directly implements Deleter interface // mockVolumePlugin directly implements Deleter interface
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter") logger.V(4).Info("Mock plugin NewDeleter called, returning mock deleter")
return plugin, nil return plugin, nil
} else { } else {
return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured") return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured")
@ -1055,7 +1055,7 @@ func (plugin *mockVolumePlugin) Delete() error {
} }
ret := plugin.deleteCalls[plugin.deleteCallCounter] ret := plugin.deleteCalls[plugin.deleteCallCounter]
plugin.deleteCallCounter++ plugin.deleteCallCounter++
klog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret) klog.TODO().V(4).Info("Mock plugin Delete call nr", "deleteCallCounter", plugin.deleteCallCounter, "err", ret)
return ret return ret
} }
@ -1081,6 +1081,6 @@ func (plugin *mockVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventR
} }
ret := plugin.recycleCalls[plugin.recycleCallCounter] ret := plugin.recycleCalls[plugin.recycleCallCounter]
plugin.recycleCallCounter++ plugin.recycleCallCounter++
klog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret) klog.TODO().V(4).Info("Mock plugin Recycle call nr", "recycleCallCounter", plugin.recycleCallCounter, "err", ret)
return ret return ret
} }

View File

@ -17,11 +17,11 @@ limitations under the License.
package persistentvolume package persistentvolume
import ( import (
"context"
"errors" "errors"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"testing" "testing"
@ -173,6 +173,7 @@ var provision2Success = provisionCall{
func TestProvisionSync(t *testing.T) { func TestProvisionSync(t *testing.T) {
// Default enable the HonorPVReclaimPolicy feature gate. // Default enable the HonorPVReclaimPolicy feature gate.
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)()
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{ tests := []controllerTest{
{ {
// Provision a volume (with a default class) // Provision a volume (with a default class)
@ -243,7 +244,7 @@ func TestProvisionSync(t *testing.T) {
expectedClaims: newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, &classGold, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner), expectedClaims: newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, &classGold, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner),
expectedEvents: noevents, expectedEvents: noevents,
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Create a volume before provisionClaimOperation starts. // Create a volume before provisionClaimOperation starts.
// This similates a parallel controller provisioning the volume. // This similates a parallel controller provisioning the volume.
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, volume.AnnBoundByController, volume.AnnDynamicallyProvisioned) volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, volume.AnnBoundByController, volume.AnnDynamicallyProvisioned)
@ -528,7 +529,7 @@ func TestProvisionSync(t *testing.T) {
newClaimArray("claim11-23", "uid11-23", "1Gi", "", v1.ClaimPending, &classCopper, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner)), newClaimArray("claim11-23", "uid11-23", "1Gi", "", v1.ClaimPending, &classCopper, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner)),
[]string{"Normal ProvisioningSucceeded"}, []string{"Normal ProvisioningSucceeded"},
noerrors, noerrors,
wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
nodesIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) nodesIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}} node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}}
@ -578,7 +579,7 @@ func TestProvisionSync(t *testing.T) {
wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
}, },
} }
runSyncTests(t, tests, storageClasses, []*v1.Pod{}) runSyncTests(t, ctx, tests, storageClasses, []*v1.Pod{})
} }
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -597,6 +598,7 @@ func TestProvisionSync(t *testing.T) {
// //
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func TestProvisionMultiSync(t *testing.T) { func TestProvisionMultiSync(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{ tests := []controllerTest{
{ {
// Provision a volume with binding // Provision a volume with binding
@ -620,7 +622,7 @@ func TestProvisionMultiSync(t *testing.T) {
newClaimArray("claim12-2", "uid12-2", "1Gi", "pvc-uid12-2", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))), newClaimArray("claim12-2", "uid12-2", "1Gi", "pvc-uid12-2", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))),
expectedEvents: []string{"Normal ExternalProvisioning"}, expectedEvents: []string{"Normal ExternalProvisioning"},
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Create a volume before syncClaim tries to bind a PV to PVC // Create a volume before syncClaim tries to bind a PV to PVC
// This simulates external provisioner creating a volume while the controller // This simulates external provisioner creating a volume while the controller
// is waiting for a volume to bind to the existed claim // is waiting for a volume to bind to the existed claim
@ -659,7 +661,7 @@ func TestProvisionMultiSync(t *testing.T) {
newClaimArray("claim12-4", "uid12-4", "1Gi", "pvc-uid12-4", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))), newClaimArray("claim12-4", "uid12-4", "1Gi", "pvc-uid12-4", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))),
expectedEvents: []string{"Normal ExternalProvisioning"}, expectedEvents: []string{"Normal ExternalProvisioning"},
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Create a volume before syncClaim tries to bind a PV to PVC // Create a volume before syncClaim tries to bind a PV to PVC
// This simulates external provisioner creating a volume while the controller // This simulates external provisioner creating a volume while the controller
// is waiting for a volume to bind to the existed claim // is waiting for a volume to bind to the existed claim
@ -676,16 +678,17 @@ func TestProvisionMultiSync(t *testing.T) {
}, },
} }
runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name) runMultisyncTests(t, ctx, tests, storageClasses, storageClasses[0].Name)
} }
// When provisioning is disabled, provisioning a claim should instantly return nil // When provisioning is disabled, provisioning a claim should instantly return nil
func TestDisablingDynamicProvisioner(t *testing.T) { func TestDisablingDynamicProvisioner(t *testing.T) {
ctrl, err := newTestController(nil, nil, false) _, ctx := ktesting.NewTestContext(t)
ctrl, err := newTestController(ctx, nil, nil, false)
if err != nil { if err != nil {
t.Fatalf("Construct PersistentVolume controller failed: %v", err) t.Fatalf("Construct PersistentVolume controller failed: %v", err)
} }
retVal := ctrl.provisionClaim(context.TODO(), nil) retVal := ctrl.provisionClaim(ctx, nil)
if retVal != nil { if retVal != nil {
t.Errorf("Expected nil return but got %v", retVal) t.Errorf("Expected nil return but got %v", retVal)
} }

File diff suppressed because it is too large Load Diff

View File

@ -79,7 +79,7 @@ type ControllerParameters struct {
} }
// NewController creates a new PersistentVolume controller // NewController creates a new PersistentVolume controller
func NewController(p ControllerParameters) (*PersistentVolumeController, error) { func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) {
eventRecorder := p.EventRecorder eventRecorder := p.EventRecorder
var eventBroadcaster record.EventBroadcaster var eventBroadcaster record.EventBroadcaster
if eventRecorder == nil { if eventRecorder == nil {
@ -112,9 +112,9 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
p.VolumeInformer.Informer().AddEventHandler( p.VolumeInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) }, AddFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) }, UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) }, DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
}, },
) )
controller.volumeLister = p.VolumeInformer.Lister() controller.volumeLister = p.VolumeInformer.Lister()
@ -122,9 +122,9 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
p.ClaimInformer.Informer().AddEventHandler( p.ClaimInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) }, AddFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) }, UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) }, DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
}, },
) )
controller.claimLister = p.ClaimInformer.Lister() controller.claimLister = p.ClaimInformer.Lister()
@ -156,53 +156,54 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
// initializeCaches fills all controller caches with initial data from etcd in // initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to // order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller. // perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) { func (ctrl *PersistentVolumeController) initializeCaches(logger klog.Logger, volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
volumeList, err := volumeLister.List(labels.Everything()) volumeList, err := volumeLister.List(labels.Everything())
if err != nil { if err != nil {
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) logger.Error(err, "PersistentVolumeController can't initialize caches")
return return
} }
for _, volume := range volumeList { for _, volume := range volumeList {
volumeClone := volume.DeepCopy() volumeClone := volume.DeepCopy()
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil { if _, err = ctrl.storeVolumeUpdate(logger, volumeClone); err != nil {
klog.Errorf("error updating volume cache: %v", err) logger.Error(err, "Error updating volume cache")
} }
} }
claimList, err := claimLister.List(labels.Everything()) claimList, err := claimLister.List(labels.Everything())
if err != nil { if err != nil {
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) logger.Error(err, "PersistentVolumeController can't initialize caches")
return return
} }
for _, claim := range claimList { for _, claim := range claimList {
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil { if _, err = ctrl.storeClaimUpdate(logger, claim.DeepCopy()); err != nil {
klog.Errorf("error updating claim cache: %v", err) logger.Error(err, "Error updating claim cache")
} }
} }
klog.V(4).Infof("controller initialized") logger.V(4).Info("Controller initialized")
} }
// enqueueWork adds volume or claim to given work queue. // enqueueWork adds volume or claim to given work queue.
func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, obj interface{}) { func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.Interface, obj interface{}) {
// Beware of "xxx deleted" events // Beware of "xxx deleted" events
logger := klog.FromContext(ctx)
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj obj = unknown.Obj
} }
objName, err := controller.KeyFunc(obj) objName, err := controller.KeyFunc(obj)
if err != nil { if err != nil {
klog.Errorf("failed to get key from object: %v", err) logger.Error(err, "Failed to get key from object")
return return
} }
klog.V(5).Infof("enqueued %q for sync", objName) logger.V(5).Info("Enqueued for sync", "objName", objName)
queue.Add(objName) queue.Add(objName)
} }
func (ctrl *PersistentVolumeController) storeVolumeUpdate(volume interface{}) (bool, error) { func (ctrl *PersistentVolumeController) storeVolumeUpdate(logger klog.Logger, volume interface{}) (bool, error) {
return storeObjectUpdate(ctrl.volumes.store, volume, "volume") return storeObjectUpdate(logger, ctrl.volumes.store, volume, "volume")
} }
func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (bool, error) { func (ctrl *PersistentVolumeController) storeClaimUpdate(logger klog.Logger, claim interface{}) (bool, error) {
return storeObjectUpdate(ctrl.claims, claim, "claim") return storeObjectUpdate(logger, ctrl.claims, claim, "claim")
} }
// updateVolume runs in worker thread and handles "volume added", // updateVolume runs in worker thread and handles "volume added",
@ -210,9 +211,10 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (boo
func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume *v1.PersistentVolume) { func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume *v1.PersistentVolume) {
// Store the new volume version in the cache and do not process it if this // Store the new volume version in the cache and do not process it if this
// is an old version. // is an old version.
new, err := ctrl.storeVolumeUpdate(volume) logger := klog.FromContext(ctx)
new, err := ctrl.storeVolumeUpdate(logger, volume)
if err != nil { if err != nil {
klog.Errorf("%v", err) logger.Error(err, "")
} }
if !new { if !new {
return return
@ -223,19 +225,20 @@ func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume
if errors.IsConflict(err) { if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller // Version conflict error happens quite often and the controller
// recovers from it easily. // recovers from it easily.
klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err) logger.V(3).Info("Could not sync volume", "volumeName", volume.Name, "err", err)
} else { } else {
klog.Errorf("could not sync volume %q: %+v", volume.Name, err) logger.Error(err, "Could not sync volume", "volumeName", volume.Name, "err", err)
} }
} }
} }
// deleteVolume runs in worker thread and handles "volume deleted" event. // deleteVolume runs in worker thread and handles "volume deleted" event.
func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) { func (ctrl *PersistentVolumeController) deleteVolume(ctx context.Context, volume *v1.PersistentVolume) {
logger := klog.FromContext(ctx)
if err := ctrl.volumes.store.Delete(volume); err != nil { if err := ctrl.volumes.store.Delete(volume); err != nil {
klog.Errorf("volume %q deletion encountered : %v", volume.Name, err) logger.Error(err, "Volume deletion encountered", "volumeName", volume.Name)
} else { } else {
klog.V(4).Infof("volume %q deleted", volume.Name) logger.V(4).Info("volume deleted", "volumeName", volume.Name)
} }
// record deletion metric if a deletion start timestamp is in the cache // record deletion metric if a deletion start timestamp is in the cache
// the following calls will be a no-op if there is nothing for this volume in the cache // the following calls will be a no-op if there is nothing for this volume in the cache
@ -249,7 +252,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
// claim here in response to volume deletion prevents the claim from // claim here in response to volume deletion prevents the claim from
// waiting until the next sync period for its Lost status. // waiting until the next sync period for its Lost status.
claimKey := claimrefToClaimKey(volume.Spec.ClaimRef) claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey) logger.V(5).Info("deleteVolume: scheduling sync of claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name)
ctrl.claimQueue.Add(claimKey) ctrl.claimQueue.Add(claimKey)
} }
@ -258,9 +261,10 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) { func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
// Store the new claim version in the cache and do not process it if this is // Store the new claim version in the cache and do not process it if this is
// an old version. // an old version.
new, err := ctrl.storeClaimUpdate(claim) logger := klog.FromContext(ctx)
new, err := ctrl.storeClaimUpdate(logger, claim)
if err != nil { if err != nil {
klog.Errorf("%v", err) logger.Error(err, "")
} }
if !new { if !new {
return return
@ -270,35 +274,36 @@ func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *
if errors.IsConflict(err) { if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller // Version conflict error happens quite often and the controller
// recovers from it easily. // recovers from it easily.
klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err) logger.V(3).Info("Could not sync claim", "PVC", klog.KObj(claim), "err", err)
} else { } else {
klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err) logger.Error(err, "Could not sync volume", "PVC", klog.KObj(claim))
} }
} }
} }
// Unit test [5-5] [5-6] [5-7] // Unit test [5-5] [5-6] [5-7]
// deleteClaim runs in worker thread and handles "claim deleted" event. // deleteClaim runs in worker thread and handles "claim deleted" event.
func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) { func (ctrl *PersistentVolumeController) deleteClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
logger := klog.FromContext(ctx)
if err := ctrl.claims.Delete(claim); err != nil { if err := ctrl.claims.Delete(claim); err != nil {
klog.Errorf("claim %q deletion encountered : %v", claim.Name, err) logger.Error(err, "Claim deletion encountered", "PVC", klog.KObj(claim))
} }
claimKey := claimToClaimKey(claim) claimKey := claimToClaimKey(claim)
klog.V(4).Infof("claim %q deleted", claimKey) logger.V(4).Info("Claim deleted", "PVC", klog.KObj(claim))
// clean any possible unfinished provision start timestamp from cache // clean any possible unfinished provision start timestamp from cache
// Unit test [5-8] [5-9] // Unit test [5-8] [5-9]
ctrl.operationTimestamps.Delete(claimKey) ctrl.operationTimestamps.Delete(claimKey)
volumeName := claim.Spec.VolumeName volumeName := claim.Spec.VolumeName
if volumeName == "" { if volumeName == "" {
klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimKey) logger.V(5).Info("deleteClaim: volume not bound", "PVC", klog.KObj(claim))
return return
} }
// sync the volume when its claim is deleted. Explicitly sync'ing the // sync the volume when its claim is deleted. Explicitly sync'ing the
// volume here in response to claim deletion prevents the volume from // volume here in response to claim deletion prevents the volume from
// waiting until the next sync period for its Release. // waiting until the next sync period for its Release.
klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimKey, volumeName) logger.V(5).Info("deleteClaim: scheduling sync of volume", "PVC", klog.KObj(claim), "volumeName", volumeName)
ctrl.volumeQueue.Add(volumeName) ctrl.volumeQueue.Add(volumeName)
} }
@ -314,17 +319,17 @@ func (ctrl *PersistentVolumeController) Run(ctx context.Context) {
ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")}) ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")})
defer ctrl.eventBroadcaster.Shutdown() defer ctrl.eventBroadcaster.Shutdown()
} }
logger := klog.FromContext(ctx)
klog.Infof("Starting persistent volume controller") logger.Info("Starting persistent volume controller")
defer klog.Infof("Shutting down persistent volume controller") defer logger.Info("Shutting down persistent volume controller")
if !cache.WaitForNamedCacheSync("persistent volume", ctx.Done(), ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) { if !cache.WaitForNamedCacheSync("persistent volume", ctx.Done(), ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
return return
} }
ctrl.initializeCaches(ctrl.volumeLister, ctrl.claimLister) ctrl.initializeCaches(logger, ctrl.volumeLister, ctrl.claimLister)
go wait.Until(ctrl.resync, ctrl.resyncPeriod, ctx.Done()) go wait.Until(func() { ctrl.resync(ctx) }, ctrl.resyncPeriod, ctx.Done())
go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second) go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second)
go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second) go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second)
@ -342,7 +347,8 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont
// when no modifications are required this function could sometimes return a // when no modifications are required this function could sometimes return a
// copy of the volume and sometimes return a ref to the original // copy of the volume and sometimes return a ref to the original
claimClone := claim.DeepCopy() claimClone := claim.DeepCopy()
modified := updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true) logger := klog.FromContext(ctx)
modified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
if !modified { if !modified {
return claimClone, nil return claimClone, nil
} }
@ -350,7 +356,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont
if err != nil { if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
} }
_, err = ctrl.storeClaimUpdate(newClaim) _, err = ctrl.storeClaimUpdate(logger, newClaim)
if err != nil { if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
} }
@ -360,8 +366,9 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont
func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinalizers(ctx context.Context, func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinalizers(ctx context.Context,
volume *v1.PersistentVolume) (*v1.PersistentVolume, error) { volume *v1.PersistentVolume) (*v1.PersistentVolume, error) {
volumeClone := volume.DeepCopy() volumeClone := volume.DeepCopy()
annModified := updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false) logger := klog.FromContext(ctx)
modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(ctrl.csiMigratedPluginManager, volumeClone) annModified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false)
modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(logger, ctrl.csiMigratedPluginManager, volumeClone)
if !annModified && !finalizersModified { if !annModified && !finalizersModified {
return volumeClone, nil return volumeClone, nil
} }
@ -372,7 +379,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinal
if err != nil { if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err) return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
} }
_, err = ctrl.storeVolumeUpdate(newVol) _, err = ctrl.storeVolumeUpdate(logger, newVol)
if err != nil { if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err) return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
} }
@ -385,7 +392,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinal
// `Recycle`, removing the finalizer is necessary to reflect the recalimPolicy updates on the PV. // `Recycle`, removing the finalizer is necessary to reflect the recalimPolicy updates on the PV.
// The method also removes any external PV Deletion Protection finalizers added on the PV, this represents CSI migration // The method also removes any external PV Deletion Protection finalizers added on the PV, this represents CSI migration
// rollback/disable scenarios. // rollback/disable scenarios.
func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) { func modifyDeletionFinalizers(logger klog.Logger, cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) {
modified := false modified := false
var outFinalizers []string var outFinalizers []string
if !utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) { if !utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) {
@ -416,18 +423,18 @@ func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.Persiste
reclaimPolicy := volume.Spec.PersistentVolumeReclaimPolicy reclaimPolicy := volume.Spec.PersistentVolumeReclaimPolicy
// Add back the in-tree PV deletion protection finalizer if does not already exists // Add back the in-tree PV deletion protection finalizer if does not already exists
if reclaimPolicy == v1.PersistentVolumeReclaimDelete && !slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) { if reclaimPolicy == v1.PersistentVolumeReclaimDelete && !slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
klog.V(4).Infof("Adding in-tree pv deletion protection finalizer on %s", volume.Name) logger.V(4).Info("Adding in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
outFinalizers = append(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer) outFinalizers = append(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer)
modified = true modified = true
} else if (reclaimPolicy == v1.PersistentVolumeReclaimRetain || reclaimPolicy == v1.PersistentVolumeReclaimRecycle) && slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) { } else if (reclaimPolicy == v1.PersistentVolumeReclaimRetain || reclaimPolicy == v1.PersistentVolumeReclaimRecycle) && slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
// Remove the in-tree PV deletion protection finalizer if the reclaim policy is 'Retain' or 'Recycle' // Remove the in-tree PV deletion protection finalizer if the reclaim policy is 'Retain' or 'Recycle'
klog.V(4).Infof("Removing in-tree pv deletion protection finalizer on %s", volume.Name) logger.V(4).Info("Removing in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil)
modified = true modified = true
} }
// Remove the external PV deletion protection finalizer // Remove the external PV deletion protection finalizer
if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) { if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) {
klog.V(4).Infof("Removing external pv deletion protection finalizer on %s", volume.Name) logger.V(4).Info("Removing external pv deletion protection finalizer on volume", "volumeName", volume.Name)
outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil)
modified = true modified = true
} }
@ -440,7 +447,7 @@ func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.Persiste
// driver name for that provisioner is "on" based on feature flags, it will also // driver name for that provisioner is "on" based on feature flags, it will also
// remove the annotation is migration is "off" for that provisioner in rollback // remove the annotation is migration is "off" for that provisioner in rollback
// scenarios. Returns true if the annotations map was modified and false otherwise. // scenarios. Returns true if the annotations map was modified and false otherwise.
func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool { func updateMigrationAnnotations(logger klog.Logger, cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool {
var csiDriverName string var csiDriverName string
var err error var err error
@ -473,7 +480,7 @@ func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINam
if cmpm.IsMigrationEnabledForPlugin(provisioner) { if cmpm.IsMigrationEnabledForPlugin(provisioner) {
csiDriverName, err = translator.GetCSINameFromInTreeName(provisioner) csiDriverName, err = translator.GetCSINameFromInTreeName(provisioner)
if err != nil { if err != nil {
klog.Errorf("Could not update volume migration annotations. Migration enabled for plugin %s but could not find corresponding driver name: %v", provisioner, err) logger.Error(err, "Could not update volume migration annotations. Migration enabled for plugin but could not find corresponding driver name", "plugin", provisioner)
return false return false
} }
if migratedToDriver != csiDriverName { if migratedToDriver != csiDriverName {
@ -493,6 +500,7 @@ func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINam
// volumeWorker processes items from volumeQueue. It must run only once, // volumeWorker processes items from volumeQueue. It must run only once,
// syncVolume is not assured to be reentrant. // syncVolume is not assured to be reentrant.
func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
logger := klog.FromContext(ctx)
workFunc := func(ctx context.Context) bool { workFunc := func(ctx context.Context) bool {
keyObj, quit := ctrl.volumeQueue.Get() keyObj, quit := ctrl.volumeQueue.Get()
if quit { if quit {
@ -500,11 +508,11 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
} }
defer ctrl.volumeQueue.Done(keyObj) defer ctrl.volumeQueue.Done(keyObj)
key := keyObj.(string) key := keyObj.(string)
klog.V(5).Infof("volumeWorker[%s]", key) logger.V(5).Info("volumeWorker", "volumeKey", key)
_, name, err := cache.SplitMetaNamespaceKey(key) _, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {
klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err) logger.V(4).Info("Error getting name of volume to get volume from informer", "volumeKey", key, "err", err)
return false return false
} }
volume, err := ctrl.volumeLister.Get(name) volume, err := ctrl.volumeLister.Get(name)
@ -515,7 +523,7 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
return false return false
} }
if !errors.IsNotFound(err) { if !errors.IsNotFound(err) {
klog.V(2).Infof("error getting volume %q from informer: %v", key, err) logger.V(2).Info("Error getting volume from informer", "volumeKey", key, "err", err)
return false return false
} }
@ -523,26 +531,26 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
// "delete" // "delete"
volumeObj, found, err := ctrl.volumes.store.GetByKey(key) volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
if err != nil { if err != nil {
klog.V(2).Infof("error getting volume %q from cache: %v", key, err) logger.V(2).Info("Error getting volume from cache", "volumeKey", key, "err", err)
return false return false
} }
if !found { if !found {
// The controller has already processed the delete event and // The controller has already processed the delete event and
// deleted the volume from its cache // deleted the volume from its cache
klog.V(2).Infof("deletion of volume %q was already processed", key) logger.V(2).Info("Deletion of volume was already processed", "volumeKey", key)
return false return false
} }
volume, ok := volumeObj.(*v1.PersistentVolume) volume, ok := volumeObj.(*v1.PersistentVolume)
if !ok { if !ok {
klog.Errorf("expected volume, got %+v", volumeObj) logger.Error(nil, "Expected volume, got", "obj", volumeObj)
return false return false
} }
ctrl.deleteVolume(volume) ctrl.deleteVolume(ctx, volume)
return false return false
} }
for { for {
if quit := workFunc(ctx); quit { if quit := workFunc(ctx); quit {
klog.Infof("volume worker queue shutting down") logger.Info("Volume worker queue shutting down")
return return
} }
} }
@ -551,6 +559,7 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
// claimWorker processes items from claimQueue. It must run only once, // claimWorker processes items from claimQueue. It must run only once,
// syncClaim is not reentrant. // syncClaim is not reentrant.
func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) { func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
logger := klog.FromContext(ctx)
workFunc := func() bool { workFunc := func() bool {
keyObj, quit := ctrl.claimQueue.Get() keyObj, quit := ctrl.claimQueue.Get()
if quit { if quit {
@ -558,11 +567,11 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
} }
defer ctrl.claimQueue.Done(keyObj) defer ctrl.claimQueue.Done(keyObj)
key := keyObj.(string) key := keyObj.(string)
klog.V(5).Infof("claimWorker[%s]", key) logger.V(5).Info("claimWorker", "claimKey", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key) namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil { if err != nil {
klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err) logger.V(4).Info("Error getting namespace & name of claim to get claim from informer", "claimKey", key, "err", err)
return false return false
} }
claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name) claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
@ -573,33 +582,33 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
return false return false
} }
if !errors.IsNotFound(err) { if !errors.IsNotFound(err) {
klog.V(2).Infof("error getting claim %q from informer: %v", key, err) logger.V(2).Info("Error getting claim from informer", "claimKey", key, "err", err)
return false return false
} }
// The claim is not in informer cache, the event must have been "delete" // The claim is not in informer cache, the event must have been "delete"
claimObj, found, err := ctrl.claims.GetByKey(key) claimObj, found, err := ctrl.claims.GetByKey(key)
if err != nil { if err != nil {
klog.V(2).Infof("error getting claim %q from cache: %v", key, err) logger.V(2).Info("Error getting claim from cache", "claimKey", key, "err", err)
return false return false
} }
if !found { if !found {
// The controller has already processed the delete event and // The controller has already processed the delete event and
// deleted the claim from its cache // deleted the claim from its cache
klog.V(2).Infof("deletion of claim %q was already processed", key) logger.V(2).Info("Deletion of claim was already processed", "claimKey", key)
return false return false
} }
claim, ok := claimObj.(*v1.PersistentVolumeClaim) claim, ok := claimObj.(*v1.PersistentVolumeClaim)
if !ok { if !ok {
klog.Errorf("expected claim, got %+v", claimObj) logger.Error(nil, "Expected claim, got", "obj", claimObj)
return false return false
} }
ctrl.deleteClaim(claim) ctrl.deleteClaim(ctx, claim)
return false return false
} }
for { for {
if quit := workFunc(); quit { if quit := workFunc(); quit {
klog.Infof("claim worker queue shutting down") logger.Info("Claim worker queue shutting down")
return return
} }
} }
@ -608,25 +617,26 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
// resync supplements short resync period of shared informers - we don't want // resync supplements short resync period of shared informers - we don't want
// all consumers of PV/PVC shared informer to have a short resync period, // all consumers of PV/PVC shared informer to have a short resync period,
// therefore we do our own. // therefore we do our own.
func (ctrl *PersistentVolumeController) resync() { func (ctrl *PersistentVolumeController) resync(ctx context.Context) {
klog.V(4).Infof("resyncing PV controller") logger := klog.FromContext(ctx)
logger.V(4).Info("Resyncing PV controller")
pvcs, err := ctrl.claimLister.List(labels.NewSelector()) pvcs, err := ctrl.claimLister.List(labels.NewSelector())
if err != nil { if err != nil {
klog.Warningf("cannot list claims: %s", err) logger.Info("Cannot list claims", "err", err)
return return
} }
for _, pvc := range pvcs { for _, pvc := range pvcs {
ctrl.enqueueWork(ctrl.claimQueue, pvc) ctrl.enqueueWork(ctx, ctrl.claimQueue, pvc)
} }
pvs, err := ctrl.volumeLister.List(labels.NewSelector()) pvs, err := ctrl.volumeLister.List(labels.NewSelector())
if err != nil { if err != nil {
klog.Warningf("cannot list persistent volumes: %s", err) logger.Info("Cannot list persistent volumes", "err", err)
return return
} }
for _, pv := range pvs { for _, pv := range pvs {
ctrl.enqueueWork(ctrl.volumeQueue, pv) ctrl.enqueueWork(ctx, ctrl.volumeQueue, pv)
} }
} }
@ -642,14 +652,15 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(ctx context.Context,
// modify these, therefore create a copy. // modify these, therefore create a copy.
claimClone := claim.DeepCopy() claimClone := claim.DeepCopy()
// TODO: remove the beta storage provisioner anno after the deprecation period // TODO: remove the beta storage provisioner anno after the deprecation period
logger := klog.FromContext(ctx)
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnBetaStorageProvisioner, provisionerName) metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnBetaStorageProvisioner, provisionerName)
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnStorageProvisioner, provisionerName) metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnStorageProvisioner, provisionerName)
updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true) updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
if err != nil { if err != nil {
return newClaim, err return newClaim, err
} }
_, err = ctrl.storeClaimUpdate(newClaim) _, err = ctrl.storeClaimUpdate(logger, newClaim)
if err != nil { if err != nil {
return newClaim, err return newClaim, err
} }
@ -678,7 +689,7 @@ func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
// callback (i.e. with events from etcd) or with an object modified by the // callback (i.e. with events from etcd) or with an object modified by the
// controller itself. Returns "true", if the cache was updated, false if the // controller itself. Returns "true", if the cache was updated, false if the
// object is an old version and should be ignored. // object is an old version and should be ignored.
func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) { func storeObjectUpdate(logger klog.Logger, store cache.Store, obj interface{}, className string) (bool, error) {
objName, err := controller.KeyFunc(obj) objName, err := controller.KeyFunc(obj)
if err != nil { if err != nil {
return false, fmt.Errorf("couldn't get key for object %+v: %w", obj, err) return false, fmt.Errorf("couldn't get key for object %+v: %w", obj, err)
@ -692,10 +703,9 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
if err != nil { if err != nil {
return false, err return false, err
} }
if !found { if !found {
// This is a new object // This is a new object
klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion()) logger.V(4).Info("storeObjectUpdate, adding obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
if err = store.Add(obj); err != nil { if err = store.Add(obj); err != nil {
return false, fmt.Errorf("error adding %s %q to controller cache: %w", className, objName, err) return false, fmt.Errorf("error adding %s %q to controller cache: %w", className, objName, err)
} }
@ -719,11 +729,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
// Throw away only older version, let the same version pass - we do want to // Throw away only older version, let the same version pass - we do want to
// get periodic sync events. // get periodic sync events.
if oldObjResourceVersion > objResourceVersion { if oldObjResourceVersion > objResourceVersion {
klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion()) logger.V(4).Info("storeObjectUpdate: ignoring obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
return false, nil return false, nil
} }
klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion()) logger.V(4).Info("storeObjectUpdate updating obj with version", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
if err = store.Update(obj); err != nil { if err = store.Update(obj); err != nil {
return false, fmt.Errorf("error updating %s %q in controller cache: %w", className, objName, err) return false, fmt.Errorf("error updating %s %q in controller cache: %w", className, objName, err)
} }

View File

@ -38,6 +38,7 @@ import (
"k8s.io/component-helpers/storage/volume" "k8s.io/component-helpers/storage/volume"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
@ -310,7 +311,7 @@ func TestControllerSync(t *testing.T) {
}, },
}, },
} }
_, ctx := ktesting.NewTestContext(t)
doit := func(test controllerTest) { doit := func(test controllerTest) {
// Initialize the controller // Initialize the controller
client := &fake.Clientset{} client := &fake.Clientset{}
@ -324,7 +325,7 @@ func TestControllerSync(t *testing.T) {
client.PrependWatchReactor("pods", core.DefaultWatchReactor(watch.NewFake(), nil)) client.PrependWatchReactor("pods", core.DefaultWatchReactor(watch.NewFake(), nil))
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
ctrl, err := newTestController(client, informers, true) ctrl, err := newTestController(ctx, client, informers, true)
if err != nil { if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err) t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
} }
@ -341,7 +342,7 @@ func TestControllerSync(t *testing.T) {
} }
ctrl.classLister = storagelisters.NewStorageClassLister(indexer) ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
reactor := newVolumeReactor(client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors) reactor := newVolumeReactor(ctx, client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors)
for _, claim := range test.initialClaims { for _, claim := range test.initialClaims {
claim = claim.DeepCopy() claim = claim.DeepCopy()
reactor.AddClaim(claim) reactor.AddClaim(claim)
@ -380,7 +381,7 @@ func TestControllerSync(t *testing.T) {
} }
// Simulate a periodic resync, just in case some events arrived in a // Simulate a periodic resync, just in case some events arrived in a
// wrong order. // wrong order.
ctrl.resync() ctrl.resync(ctx)
err = reactor.waitTest(test) err = reactor.waitTest(test)
if err != nil { if err != nil {
@ -388,7 +389,7 @@ func TestControllerSync(t *testing.T) {
} }
cancel() cancel()
evaluateTestResults(ctrl, reactor.VolumeReactor, test, t) evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)
} }
for _, test := range tests { for _, test := range tests {
@ -402,7 +403,8 @@ func TestControllerSync(t *testing.T) {
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) { func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty) pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = version pv.ResourceVersion = version
ret, err := storeObjectUpdate(c, pv, "volume") logger, _ := ktesting.NewTestContext(t)
ret, err := storeObjectUpdate(logger, c, pv, "volume")
if err != nil { if err != nil {
t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err) t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
} }
@ -461,7 +463,8 @@ func TestControllerCacheParsingError(t *testing.T) {
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty) pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = "xxx" pv.ResourceVersion = "xxx"
_, err := storeObjectUpdate(c, pv, "volume") logger, _ := ktesting.NewTestContext(t)
_, err := storeObjectUpdate(logger, c, pv, "volume")
if err == nil { if err == nil {
t.Errorf("Expected parsing error, got nil instead") t.Errorf("Expected parsing error, got nil instead")
} }
@ -572,19 +575,19 @@ func TestAnnealMigrationAnnotations(t *testing.T) {
translator := csitrans.New() translator := csitrans.New()
cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate) cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate)
logger, _ := ktesting.NewTestContext(t)
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
if tc.volumeAnnotations != nil { if tc.volumeAnnotations != nil {
ann := tc.volumeAnnotations ann := tc.volumeAnnotations
updateMigrationAnnotations(cmpm, translator, ann, false) updateMigrationAnnotations(logger, cmpm, translator, ann, false)
if !reflect.DeepEqual(tc.expVolumeAnnotations, ann) { if !reflect.DeepEqual(tc.expVolumeAnnotations, ann) {
t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations) t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations)
} }
} }
if tc.claimAnnotations != nil { if tc.claimAnnotations != nil {
ann := tc.claimAnnotations ann := tc.claimAnnotations
updateMigrationAnnotations(cmpm, translator, ann, true) updateMigrationAnnotations(logger, cmpm, translator, ann, true)
if !reflect.DeepEqual(tc.expClaimAnnotations, ann) { if !reflect.DeepEqual(tc.expClaimAnnotations, ann) {
t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations) t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations)
} }
@ -732,13 +735,13 @@ func TestModifyDeletionFinalizers(t *testing.T) {
translator := csitrans.New() translator := csitrans.New()
cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate) cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate)
logger, _ := ktesting.NewTestContext(t)
for _, tc := range tests { for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
if tc.volumeAnnotations != nil { if tc.volumeAnnotations != nil {
tc.initialVolume.SetAnnotations(tc.volumeAnnotations) tc.initialVolume.SetAnnotations(tc.volumeAnnotations)
} }
modifiedFinalizers, modified := modifyDeletionFinalizers(cmpm, tc.initialVolume) modifiedFinalizers, modified := modifyDeletionFinalizers(logger, cmpm, tc.initialVolume)
if modified != tc.expModified { if modified != tc.expModified {
t.Errorf("got modified: %v, but expected: %v", modified, tc.expModified) t.Errorf("got modified: %v, but expected: %v", modified, tc.expModified)
} }
@ -881,7 +884,8 @@ func TestRetroactiveStorageClassAssignment(t *testing.T) {
}, },
}, },
} }
_, ctx := ktesting.NewTestContext(t)
for _, test := range tests { for _, test := range tests {
runSyncTests(t, test.tests, test.storageClasses, nil) runSyncTests(t, ctx, test.tests, test.storageClasses, nil)
} }
} }

View File

@ -24,6 +24,7 @@ import (
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/component-helpers/storage/volume" "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2/ktesting"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
) )
@ -32,6 +33,7 @@ import (
// 2. Call the syncVolume *once*. // 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes. // 3. Compare resulting volumes with expected volumes.
func TestRecycleSync(t *testing.T) { func TestRecycleSync(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
runningPod := &v1.Pod{ runningPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "runningPod", Name: "runningPod",
@ -139,7 +141,7 @@ func TestRecycleSync(t *testing.T) {
expectedClaims: noclaims, expectedClaims: noclaims,
expectedEvents: noevents, expectedEvents: noevents,
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Delete the volume before recycle operation starts // Delete the volume before recycle operation starts
reactor.DeleteVolume("volume6-6") reactor.DeleteVolume("volume6-6")
}), }),
@ -155,7 +157,7 @@ func TestRecycleSync(t *testing.T) {
expectedClaims: noclaims, expectedClaims: noclaims,
expectedEvents: noevents, expectedEvents: noevents,
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Mark the volume as Available before the recycler starts // Mark the volume as Available before the recycler starts
reactor.MarkVolumeAvailable("volume6-7") reactor.MarkVolumeAvailable("volume6-7")
}), }),
@ -172,7 +174,7 @@ func TestRecycleSync(t *testing.T) {
expectedClaims: noclaims, expectedClaims: noclaims,
expectedEvents: noevents, expectedEvents: noevents,
errors: noerrors, errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Mark the volume as Available before the recycler starts // Mark the volume as Available before the recycler starts
reactor.MarkVolumeAvailable("volume6-8") reactor.MarkVolumeAvailable("volume6-8")
}), }),
@ -249,7 +251,7 @@ func TestRecycleSync(t *testing.T) {
test: wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), test: wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
}, },
} }
runSyncTests(t, tests, []*storage.StorageClass{}, pods) runSyncTests(t, ctx, tests, []*storage.StorageClass{}, pods)
} }
// Test multiple calls to syncClaim/syncVolume and periodic sync of all // Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -268,6 +270,7 @@ func TestRecycleSync(t *testing.T) {
// //
// Some limit of calls in enforced to prevent endless loops. // Some limit of calls in enforced to prevent endless loops.
func TestRecycleMultiSync(t *testing.T) { func TestRecycleMultiSync(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{ tests := []controllerTest{
{ {
// recycle failure - recycle returns error. The controller should // recycle failure - recycle returns error. The controller should
@ -282,5 +285,5 @@ func TestRecycleMultiSync(t *testing.T) {
}, },
} }
runMultisyncTests(t, tests, []*storage.StorageClass{}, "") runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "")
} }

View File

@ -17,8 +17,10 @@ limitations under the License.
package testing package testing
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"k8s.io/klog/v2"
"reflect" "reflect"
"strconv" "strconv"
"sync" "sync"
@ -32,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/klog/v2"
) )
// ErrVersionConflict is the error returned when resource version of requested // ErrVersionConflict is the error returned when resource version of requested
@ -87,14 +88,14 @@ type ReactorError struct {
// to evaluate test results. // to evaluate test results.
// All updated objects are also inserted into changedObjects queue and // All updated objects are also inserted into changedObjects queue and
// optionally sent back to the controller via its watchers. // optionally sent back to the controller via its watchers.
func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Object, err error) { func (r *VolumeReactor) React(ctx context.Context, action core.Action) (handled bool, ret runtime.Object, err error) {
r.lock.Lock() r.lock.Lock()
defer r.lock.Unlock() defer r.lock.Unlock()
logger := klog.FromContext(ctx)
klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) logger.V(4).Info("Reactor got operation", "resource", action.GetResource(), "verb", action.GetVerb())
// Inject error when requested // Inject error when requested
err = r.injectReactError(action) err = r.injectReactError(ctx, action)
if err != nil { if err != nil {
return true, nil, err return true, nil, err
} }
@ -124,7 +125,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
} }
r.changedObjects = append(r.changedObjects, volume) r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++ r.changedSinceLastSync++
klog.V(4).Infof("created volume %s", volume.Name) logger.V(4).Info("Created volume", "volumeName", volume.Name)
return true, volume, nil return true, volume, nil
case action.Matches("create", "persistentvolumeclaims"): case action.Matches("create", "persistentvolumeclaims"):
@ -144,7 +145,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
} }
r.changedObjects = append(r.changedObjects, claim) r.changedObjects = append(r.changedObjects, claim)
r.changedSinceLastSync++ r.changedSinceLastSync++
klog.V(4).Infof("created claim %s", claim.Name) logger.V(4).Info("Created claim", "PVC", klog.KObj(claim))
return true, claim, nil return true, claim, nil
case action.Matches("update", "persistentvolumes"): case action.Matches("update", "persistentvolumes"):
@ -160,7 +161,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
return true, obj, ErrVersionConflict return true, obj, ErrVersionConflict
} }
if reflect.DeepEqual(storedVolume, volume) { if reflect.DeepEqual(storedVolume, volume) {
klog.V(4).Infof("nothing updated volume %s", volume.Name) logger.V(4).Info("Nothing updated volume", "volumeName", volume.Name)
return true, volume, nil return true, volume, nil
} }
// Don't modify the existing object // Don't modify the existing object
@ -177,7 +178,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.volumes[volume.Name] = volume r.volumes[volume.Name] = volume
r.changedObjects = append(r.changedObjects, volume) r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++ r.changedSinceLastSync++
klog.V(4).Infof("saved updated volume %s", volume.Name) logger.V(4).Info("Saved updated volume", "volumeName", volume.Name)
return true, volume, nil return true, volume, nil
case action.Matches("update", "persistentvolumeclaims"): case action.Matches("update", "persistentvolumeclaims"):
@ -193,7 +194,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
return true, obj, ErrVersionConflict return true, obj, ErrVersionConflict
} }
if reflect.DeepEqual(storedClaim, claim) { if reflect.DeepEqual(storedClaim, claim) {
klog.V(4).Infof("nothing updated claim %s", claim.Name) logger.V(4).Info("Nothing updated claim", "PVC", klog.KObj(claim))
return true, claim, nil return true, claim, nil
} }
// Don't modify the existing object // Don't modify the existing object
@ -210,32 +211,33 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.claims[claim.Name] = claim r.claims[claim.Name] = claim
r.changedObjects = append(r.changedObjects, claim) r.changedObjects = append(r.changedObjects, claim)
r.changedSinceLastSync++ r.changedSinceLastSync++
klog.V(4).Infof("saved updated claim %s", claim.Name) logger.V(4).Info("Saved updated claim", "PVC", klog.KObj(claim))
return true, claim, nil return true, claim, nil
case action.Matches("get", "persistentvolumes"): case action.Matches("get", "persistentvolumes"):
name := action.(core.GetAction).GetName() name := action.(core.GetAction).GetName()
volume, found := r.volumes[name] volume, found := r.volumes[name]
if found { if found {
klog.V(4).Infof("GetVolume: found %s", volume.Name) logger.V(4).Info("GetVolume: found volume", "volumeName", volume.Name)
return true, volume.DeepCopy(), nil return true, volume.DeepCopy(), nil
} }
klog.V(4).Infof("GetVolume: volume %s not found", name) logger.V(4).Info("GetVolume: volume not found", "volumeName", name)
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name) return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
case action.Matches("get", "persistentvolumeclaims"): case action.Matches("get", "persistentvolumeclaims"):
name := action.(core.GetAction).GetName() name := action.(core.GetAction).GetName()
nameSpace := action.(core.GetAction).GetNamespace()
claim, found := r.claims[name] claim, found := r.claims[name]
if found { if found {
klog.V(4).Infof("GetClaim: found %s", claim.Name) logger.V(4).Info("GetClaim: found claim", "PVC", klog.KObj(claim))
return true, claim.DeepCopy(), nil return true, claim.DeepCopy(), nil
} }
klog.V(4).Infof("GetClaim: claim %s not found", name) logger.V(4).Info("GetClaim: claim not found", "PVC", klog.KRef(nameSpace, name))
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name) return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
case action.Matches("delete", "persistentvolumes"): case action.Matches("delete", "persistentvolumes"):
name := action.(core.DeleteAction).GetName() name := action.(core.DeleteAction).GetName()
klog.V(4).Infof("deleted volume %s", name) logger.V(4).Info("Deleted volume", "volumeName", name)
obj, found := r.volumes[name] obj, found := r.volumes[name]
if found { if found {
delete(r.volumes, name) delete(r.volumes, name)
@ -249,7 +251,8 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
case action.Matches("delete", "persistentvolumeclaims"): case action.Matches("delete", "persistentvolumeclaims"):
name := action.(core.DeleteAction).GetName() name := action.(core.DeleteAction).GetName()
klog.V(4).Infof("deleted claim %s", name) nameSpace := action.(core.DeleteAction).GetNamespace()
logger.V(4).Info("Deleted claim", "PVC", klog.KRef(nameSpace, name))
obj, found := r.claims[name] obj, found := r.claims[name]
if found { if found {
delete(r.claims, name) delete(r.claims, name)
@ -297,18 +300,18 @@ func (r *VolumeReactor) getWatches(gvr schema.GroupVersionResource, ns string) [
// injectReactError returns an error when the test requested given action to // injectReactError returns an error when the test requested given action to
// fail. nil is returned otherwise. // fail. nil is returned otherwise.
func (r *VolumeReactor) injectReactError(action core.Action) error { func (r *VolumeReactor) injectReactError(ctx context.Context, action core.Action) error {
if len(r.errors) == 0 { if len(r.errors) == 0 {
// No more errors to inject, everything should succeed. // No more errors to inject, everything should succeed.
return nil return nil
} }
logger := klog.FromContext(ctx)
for i, expected := range r.errors { for i, expected := range r.errors {
klog.V(4).Infof("trying to match %q %q with %q %q", expected.Verb, expected.Resource, action.GetVerb(), action.GetResource()) logger.V(4).Info("Trying to match resource verb", "resource", action.GetResource(), "verb", action.GetVerb(), "expectedResource", expected.Resource, "expectedVerb", expected.Verb)
if action.Matches(expected.Verb, expected.Resource) { if action.Matches(expected.Verb, expected.Resource) {
// That's the action we're waiting for, remove it from injectedErrors // That's the action we're waiting for, remove it from injectedErrors
r.errors = append(r.errors[:i], r.errors[i+1:]...) r.errors = append(r.errors[:i], r.errors[i+1:]...)
klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.Verb, expected.Resource, expected.Error) logger.V(4).Info("Reactor found matching error", "index", i, "expectedResource", expected.Resource, "expectedVerb", expected.Verb, "err", expected.Error)
return expected.Error return expected.Error
} }
} }
@ -382,7 +385,7 @@ func (r *VolumeReactor) CheckClaims(expectedClaims []*v1.PersistentVolumeClaim)
// PopChange returns one recorded updated object, either *v1.PersistentVolume // PopChange returns one recorded updated object, either *v1.PersistentVolume
// or *v1.PersistentVolumeClaim. Returns nil when there are no changes. // or *v1.PersistentVolumeClaim. Returns nil when there are no changes.
func (r *VolumeReactor) PopChange() interface{} { func (r *VolumeReactor) PopChange(ctx context.Context) interface{} {
r.lock.Lock() r.lock.Lock()
defer r.lock.Unlock() defer r.lock.Unlock()
@ -391,14 +394,15 @@ func (r *VolumeReactor) PopChange() interface{} {
} }
// For debugging purposes, print the queue // For debugging purposes, print the queue
logger := klog.FromContext(ctx)
for _, obj := range r.changedObjects { for _, obj := range r.changedObjects {
switch obj.(type) { switch obj.(type) {
case *v1.PersistentVolume: case *v1.PersistentVolume:
vol, _ := obj.(*v1.PersistentVolume) vol, _ := obj.(*v1.PersistentVolume)
klog.V(4).Infof("reactor queue: %s", vol.Name) logger.V(4).Info("Reactor queue", "volumeName", vol.Name)
case *v1.PersistentVolumeClaim: case *v1.PersistentVolumeClaim:
claim, _ := obj.(*v1.PersistentVolumeClaim) claim, _ := obj.(*v1.PersistentVolumeClaim)
klog.V(4).Infof("reactor queue: %s", claim.Name) logger.V(4).Info("Reactor queue", "PVC", klog.KObj(claim))
} }
} }
@ -539,7 +543,7 @@ func (r *VolumeReactor) MarkVolumeAvailable(name string) {
} }
// NewVolumeReactor creates a volume reactor. // NewVolumeReactor creates a volume reactor.
func NewVolumeReactor(client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []ReactorError) *VolumeReactor { func NewVolumeReactor(ctx context.Context, client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []ReactorError) *VolumeReactor {
reactor := &VolumeReactor{ reactor := &VolumeReactor{
volumes: make(map[string]*v1.PersistentVolume), volumes: make(map[string]*v1.PersistentVolume),
claims: make(map[string]*v1.PersistentVolumeClaim), claims: make(map[string]*v1.PersistentVolumeClaim),
@ -548,13 +552,30 @@ func NewVolumeReactor(client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *w
errors: errors, errors: errors,
watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher),
} }
client.AddReactor("create", "persistentvolumes", reactor.React) client.AddReactor("create", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
client.AddReactor("create", "persistentvolumeclaims", reactor.React) return reactor.React(ctx, action)
client.AddReactor("update", "persistentvolumes", reactor.React) })
client.AddReactor("update", "persistentvolumeclaims", reactor.React)
client.AddReactor("get", "persistentvolumes", reactor.React) client.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
client.AddReactor("get", "persistentvolumeclaims", reactor.React) return reactor.React(ctx, action)
client.AddReactor("delete", "persistentvolumes", reactor.React) })
client.AddReactor("delete", "persistentvolumeclaims", reactor.React) client.AddReactor("update", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("get", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("delete", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("delete", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
return reactor return reactor
} }

View File

@ -123,7 +123,7 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s
func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) { func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) { return func(types.UID) {
klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController") klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in PersistentVolumeController")
} }
} }

View File

@ -55,7 +55,7 @@ type Controller struct {
} }
// NewPVCProtectionController returns a new instance of PVCProtectionController. // NewPVCProtectionController returns a new instance of PVCProtectionController.
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) { func NewPVCProtectionController(logger klog.Logger, pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) {
e := &Controller{ e := &Controller{
client: cl, client: cl,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
@ -64,9 +64,11 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
e.pvcLister = pvcInformer.Lister() e.pvcLister = pvcInformer.Lister()
e.pvcListerSynced = pvcInformer.Informer().HasSynced e.pvcListerSynced = pvcInformer.Informer().HasSynced
pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.pvcAddedUpdated, AddFunc: func(obj interface{}) {
e.pvcAddedUpdated(logger, obj)
},
UpdateFunc: func(old, new interface{}) { UpdateFunc: func(old, new interface{}) {
e.pvcAddedUpdated(new) e.pvcAddedUpdated(logger, new)
}, },
}) })
@ -78,13 +80,13 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
} }
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
e.podAddedDeletedUpdated(nil, obj, false) e.podAddedDeletedUpdated(logger, nil, obj, false)
}, },
DeleteFunc: func(obj interface{}) { DeleteFunc: func(obj interface{}) {
e.podAddedDeletedUpdated(nil, obj, true) e.podAddedDeletedUpdated(logger, nil, obj, true)
}, },
UpdateFunc: func(old, new interface{}) { UpdateFunc: func(old, new interface{}) {
e.podAddedDeletedUpdated(old, new, false) e.podAddedDeletedUpdated(logger, old, new, false)
}, },
}) })
@ -96,8 +98,9 @@ func (c *Controller) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
defer c.queue.ShutDown() defer c.queue.ShutDown()
klog.InfoS("Starting PVC protection controller") logger := klog.FromContext(ctx)
defer klog.InfoS("Shutting down PVC protection controller") logger.Info("Starting PVC protection controller")
defer logger.Info("Shutting down PVC protection controller")
if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) { if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) {
return return
@ -142,15 +145,16 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
} }
func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName string) error { func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName string) error {
klog.V(4).InfoS("Processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName)) logger := klog.FromContext(ctx)
logger.V(4).Info("Processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName))
startTime := time.Now() startTime := time.Now()
defer func() { defer func() {
klog.V(4).InfoS("Finished processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName), "duration", time.Since(startTime)) logger.V(4).Info("Finished processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName), "duration", time.Since(startTime))
}() }()
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName) pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
klog.V(4).InfoS("PVC not found, ignoring", "PVC", klog.KRef(pvcNamespace, pvcName)) logger.V(4).Info("PVC not found, ignoring", "PVC", klog.KRef(pvcNamespace, pvcName))
return nil return nil
} }
if err != nil { if err != nil {
@ -167,7 +171,7 @@ func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName strin
if !isUsed { if !isUsed {
return c.removeFinalizer(ctx, pvc) return c.removeFinalizer(ctx, pvc)
} }
klog.V(2).InfoS("Keeping PVC because it is being used", "PVC", klog.KObj(pvc)) logger.V(2).Info("Keeping PVC because it is being used", "PVC", klog.KObj(pvc))
} }
if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) { if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) {
@ -184,11 +188,12 @@ func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeC
claimClone := pvc.DeepCopy() claimClone := pvc.DeepCopy()
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer) claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{}) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil { if err != nil {
klog.ErrorS(err, "Error adding protection finalizer to PVC", "PVC", klog.KObj(pvc)) logger.Error(err, "Error adding protection finalizer to PVC", "PVC", klog.KObj(pvc))
return err return err
} }
klog.V(3).InfoS("Added protection finalizer to PVC", "PVC", klog.KObj(pvc)) logger.V(3).Info("Added protection finalizer to PVC", "PVC", klog.KObj(pvc))
return nil return nil
} }
@ -196,11 +201,12 @@ func (c *Controller) removeFinalizer(ctx context.Context, pvc *v1.PersistentVolu
claimClone := pvc.DeepCopy() claimClone := pvc.DeepCopy()
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{}) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil { if err != nil {
klog.ErrorS(err, "Error removing protection finalizer from PVC", "PVC", klog.KObj(pvc)) logger.Error(err, "Error removing protection finalizer from PVC", "PVC", klog.KObj(pvc))
return err return err
} }
klog.V(3).InfoS("Removed protection finalizer from PVC", "PVC", klog.KObj(pvc)) logger.V(3).Info("Removed protection finalizer from PVC", "PVC", klog.KObj(pvc))
return nil return nil
} }
@ -208,9 +214,10 @@ func (c *Controller) isBeingUsed(ctx context.Context, pvc *v1.PersistentVolumeCl
// Look for a Pod using pvc in the Informer's cache. If one is found the // Look for a Pod using pvc in the Informer's cache. If one is found the
// correct decision to keep pvc is taken without doing an expensive live // correct decision to keep pvc is taken without doing an expensive live
// list. // list.
if inUse, err := c.askInformer(pvc); err != nil { logger := klog.FromContext(ctx)
if inUse, err := c.askInformer(logger, pvc); err != nil {
// No need to return because a live list will follow. // No need to return because a live list will follow.
klog.Error(err) logger.Error(err, "")
} else if inUse { } else if inUse {
return true, nil return true, nil
} }
@ -222,8 +229,8 @@ func (c *Controller) isBeingUsed(ctx context.Context, pvc *v1.PersistentVolumeCl
return c.askAPIServer(ctx, pvc) return c.askAPIServer(ctx, pvc)
} }
func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) { func (c *Controller) askInformer(logger klog.Logger, pvc *v1.PersistentVolumeClaim) (bool, error) {
klog.V(4).InfoS("Looking for Pods using PVC in the Informer's cache", "PVC", klog.KObj(pvc)) logger.V(4).Info("Looking for Pods using PVC in the Informer's cache", "PVC", klog.KObj(pvc))
// The indexer is used to find pods which might use the PVC. // The indexer is used to find pods which might use the PVC.
objs, err := c.podIndexer.ByIndex(common.PodPVCIndex, fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name)) objs, err := c.podIndexer.ByIndex(common.PodPVCIndex, fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name))
@ -239,17 +246,18 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
// We still need to look at each volume: that's redundant for volume.PersistentVolumeClaim, // We still need to look at each volume: that's redundant for volume.PersistentVolumeClaim,
// but for volume.Ephemeral we need to be sure that this particular PVC is the one // but for volume.Ephemeral we need to be sure that this particular PVC is the one
// created for the ephemeral volume. // created for the ephemeral volume.
if c.podUsesPVC(pod, pvc) { if c.podUsesPVC(logger, pod, pvc) {
return true, nil return true, nil
} }
} }
klog.V(4).InfoS("No Pod using PVC was found in the Informer's cache", "PVC", klog.KObj(pvc)) logger.V(4).Info("No Pod using PVC was found in the Informer's cache", "PVC", klog.KObj(pvc))
return false, nil return false, nil
} }
func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeClaim) (bool, error) { func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeClaim) (bool, error) {
klog.V(4).InfoS("Looking for Pods using PVC with a live list", "PVC", klog.KObj(pvc)) logger := klog.FromContext(ctx)
logger.V(4).Info("Looking for Pods using PVC with a live list", "PVC", klog.KObj(pvc))
podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(ctx, metav1.ListOptions{}) podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
@ -257,16 +265,16 @@ func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeC
} }
for _, pod := range podsList.Items { for _, pod := range podsList.Items {
if c.podUsesPVC(&pod, pvc) { if c.podUsesPVC(logger, &pod, pvc) {
return true, nil return true, nil
} }
} }
klog.V(2).InfoS("PVC is unused", "PVC", klog.KObj(pvc)) logger.V(2).Info("PVC is unused", "PVC", klog.KObj(pvc))
return false, nil return false, nil
} }
func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool { func (c *Controller) podUsesPVC(logger klog.Logger, pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool {
// Check whether pvc is used by pod only if pod is scheduled, because // Check whether pvc is used by pod only if pod is scheduled, because
// kubelet sees pods after they have been scheduled and it won't allow // kubelet sees pods after they have been scheduled and it won't allow
// starting a pod referencing a PVC with a non-nil deletionTimestamp. // starting a pod referencing a PVC with a non-nil deletionTimestamp.
@ -274,7 +282,7 @@ func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool
for _, volume := range pod.Spec.Volumes { for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == pvc.Name || if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == pvc.Name ||
!podIsShutDown(pod) && volume.Ephemeral != nil && ephemeral.VolumeClaimName(pod, &volume) == pvc.Name && ephemeral.VolumeIsForPod(pod, pvc) == nil { !podIsShutDown(pod) && volume.Ephemeral != nil && ephemeral.VolumeClaimName(pod, &volume) == pvc.Name && ephemeral.VolumeIsForPod(pod, pvc) == nil {
klog.V(2).InfoS("Pod uses PVC", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc)) logger.V(2).Info("Pod uses PVC", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc))
return true return true
} }
} }
@ -313,7 +321,7 @@ func podIsShutDown(pod *v1.Pod) bool {
} }
// pvcAddedUpdated reacts to pvc added/updated events // pvcAddedUpdated reacts to pvc added/updated events
func (c *Controller) pvcAddedUpdated(obj interface{}) { func (c *Controller) pvcAddedUpdated(logger klog.Logger, obj interface{}) {
pvc, ok := obj.(*v1.PersistentVolumeClaim) pvc, ok := obj.(*v1.PersistentVolumeClaim)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("PVC informer returned non-PVC object: %#v", obj)) utilruntime.HandleError(fmt.Errorf("PVC informer returned non-PVC object: %#v", obj))
@ -324,7 +332,7 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("couldn't get key for Persistent Volume Claim %#v: %v", pvc, err)) utilruntime.HandleError(fmt.Errorf("couldn't get key for Persistent Volume Claim %#v: %v", pvc, err))
return return
} }
klog.V(4).InfoS("Got event on PVC", "pvc", klog.KObj(pvc)) logger.V(4).Info("Got event on PVC", "pvc", klog.KObj(pvc))
if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) || protectionutil.IsDeletionCandidate(pvc, volumeutil.PVCProtectionFinalizer) { if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) || protectionutil.IsDeletionCandidate(pvc, volumeutil.PVCProtectionFinalizer) {
c.queue.Add(key) c.queue.Add(key)
@ -332,9 +340,9 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) {
} }
// podAddedDeletedUpdated reacts to Pod events // podAddedDeletedUpdated reacts to Pod events
func (c *Controller) podAddedDeletedUpdated(old, new interface{}, deleted bool) { func (c *Controller) podAddedDeletedUpdated(logger klog.Logger, old, new interface{}, deleted bool) {
if pod := c.parsePod(new); pod != nil { if pod := c.parsePod(new); pod != nil {
c.enqueuePVCs(pod, deleted) c.enqueuePVCs(logger, pod, deleted)
// An update notification might mask the deletion of a pod X and the // An update notification might mask the deletion of a pod X and the
// following creation of a pod Y with the same namespaced name as X. If // following creation of a pod Y with the same namespaced name as X. If
@ -342,7 +350,7 @@ func (c *Controller) podAddedDeletedUpdated(old, new interface{}, deleted bool)
// where it is blocking deletion of a PVC not referenced by Y, otherwise // where it is blocking deletion of a PVC not referenced by Y, otherwise
// such PVC will never be deleted. // such PVC will never be deleted.
if oldPod := c.parsePod(old); oldPod != nil && oldPod.UID != pod.UID { if oldPod := c.parsePod(old); oldPod != nil && oldPod.UID != pod.UID {
c.enqueuePVCs(oldPod, true) c.enqueuePVCs(logger, oldPod, true)
} }
} }
} }
@ -367,13 +375,13 @@ func (*Controller) parsePod(obj interface{}) *v1.Pod {
return pod return pod
} }
func (c *Controller) enqueuePVCs(pod *v1.Pod, deleted bool) { func (c *Controller) enqueuePVCs(logger klog.Logger, pod *v1.Pod, deleted bool) {
// Filter out pods that can't help us to remove a finalizer on PVC // Filter out pods that can't help us to remove a finalizer on PVC
if !deleted && !volumeutil.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" { if !deleted && !volumeutil.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" {
return return
} }
klog.V(4).InfoS("Enqueuing PVCs for Pod", "pod", klog.KObj(pod), "podUID", pod.UID) logger.V(4).Info("Enqueuing PVCs for Pod", "pod", klog.KObj(pod), "podUID", pod.UID)
// Enqueue all PVCs that the pod uses // Enqueue all PVCs that the pod uses
for _, volume := range pod.Spec.Volumes { for _, volume := range pod.Spec.Volumes {

View File

@ -36,6 +36,7 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing" clienttesting "k8s.io/client-go/testing"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
@ -399,7 +400,8 @@ func TestPVCProtectionController(t *testing.T) {
podInformer := informers.Core().V1().Pods() podInformer := informers.Core().V1().Pods()
// Create the controller // Create the controller
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client) logger, _ := ktesting.NewTestContext(t)
ctrl, err := NewPVCProtectionController(logger, pvcInformer, podInformer, client)
if err != nil { if err != nil {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
@ -424,15 +426,15 @@ func TestPVCProtectionController(t *testing.T) {
// Start the test by simulating an event // Start the test by simulating an event
if test.updatedPVC != nil { if test.updatedPVC != nil {
ctrl.pvcAddedUpdated(test.updatedPVC) ctrl.pvcAddedUpdated(logger, test.updatedPVC)
} }
switch { switch {
case test.deletedPod != nil && test.updatedPod != nil && test.deletedPod.Namespace == test.updatedPod.Namespace && test.deletedPod.Name == test.updatedPod.Name: case test.deletedPod != nil && test.updatedPod != nil && test.deletedPod.Namespace == test.updatedPod.Namespace && test.deletedPod.Name == test.updatedPod.Name:
ctrl.podAddedDeletedUpdated(test.deletedPod, test.updatedPod, false) ctrl.podAddedDeletedUpdated(logger, test.deletedPod, test.updatedPod, false)
case test.updatedPod != nil: case test.updatedPod != nil:
ctrl.podAddedDeletedUpdated(nil, test.updatedPod, false) ctrl.podAddedDeletedUpdated(logger, nil, test.updatedPod, false)
case test.deletedPod != nil: case test.deletedPod != nil:
ctrl.podAddedDeletedUpdated(nil, test.deletedPod, true) ctrl.podAddedDeletedUpdated(logger, nil, test.deletedPod, true)
} }
// Process the controller queue until we get expected results // Process the controller queue until we get expected results

View File

@ -49,7 +49,7 @@ type Controller struct {
} }
// NewPVProtectionController returns a new *Controller. // NewPVProtectionController returns a new *Controller.
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller { func NewPVProtectionController(logger klog.Logger, pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
e := &Controller{ e := &Controller{
client: cl, client: cl,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
@ -58,9 +58,11 @@ func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer
e.pvLister = pvInformer.Lister() e.pvLister = pvInformer.Lister()
e.pvListerSynced = pvInformer.Informer().HasSynced e.pvListerSynced = pvInformer.Informer().HasSynced
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.pvAddedUpdated, AddFunc: func(obj interface{}) {
e.pvAddedUpdated(logger, obj)
},
UpdateFunc: func(old, new interface{}) { UpdateFunc: func(old, new interface{}) {
e.pvAddedUpdated(new) e.pvAddedUpdated(logger, new)
}, },
}) })
@ -72,8 +74,9 @@ func (c *Controller) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
defer c.queue.ShutDown() defer c.queue.ShutDown()
klog.Infof("Starting PV protection controller") logger := klog.FromContext(ctx)
defer klog.Infof("Shutting down PV protection controller") logger.Info("Starting PV protection controller")
defer logger.Info("Shutting down PV protection controller")
if !cache.WaitForNamedCacheSync("PV protection", ctx.Done(), c.pvListerSynced) { if !cache.WaitForNamedCacheSync("PV protection", ctx.Done(), c.pvListerSynced) {
return return
@ -114,15 +117,16 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
} }
func (c *Controller) processPV(ctx context.Context, pvName string) error { func (c *Controller) processPV(ctx context.Context, pvName string) error {
klog.V(4).Infof("Processing PV %s", pvName) logger := klog.FromContext(ctx)
logger.V(4).Info("Processing PV", "PV", klog.KRef("", pvName))
startTime := time.Now() startTime := time.Now()
defer func() { defer func() {
klog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime)) logger.V(4).Info("Finished processing PV", "PV", klog.KRef("", pvName), "cost", time.Since(startTime))
}() }()
pv, err := c.pvLister.Get(pvName) pv, err := c.pvLister.Get(pvName)
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
klog.V(4).Infof("PV %s not found, ignoring", pvName) logger.V(4).Info("PV not found, ignoring", "PV", klog.KRef("", pvName))
return nil return nil
} }
if err != nil { if err != nil {
@ -136,7 +140,7 @@ func (c *Controller) processPV(ctx context.Context, pvName string) error {
if !isUsed { if !isUsed {
return c.removeFinalizer(ctx, pv) return c.removeFinalizer(ctx, pv)
} }
klog.V(4).Infof("Keeping PV %s because it is being used", pvName) logger.V(4).Info("Keeping PV because it is being used", "PV", klog.KRef("", pvName))
} }
if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) { if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) {
@ -153,11 +157,12 @@ func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume)
pvClone := pv.DeepCopy() pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer) pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{}) _, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil { if err != nil {
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err) logger.V(3).Info("Error adding protection finalizer to PV", "PV", klog.KObj(pv), "err", err)
return err return err
} }
klog.V(3).Infof("Added protection finalizer to PV %s", pv.Name) logger.V(3).Info("Added protection finalizer to PV", "PV", klog.KObj(pv))
return nil return nil
} }
@ -165,11 +170,12 @@ func (c *Controller) removeFinalizer(ctx context.Context, pv *v1.PersistentVolum
pvClone := pv.DeepCopy() pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil) pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{}) _, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil { if err != nil {
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err) logger.V(3).Info("Error removing protection finalizer from PV", "PV", klog.KObj(pv), "err", err)
return err return err
} }
klog.V(3).Infof("Removed protection finalizer from PV %s", pv.Name) logger.V(3).Info("Removed protection finalizer from PV", "PV", klog.KObj(pv))
return nil return nil
} }
@ -185,13 +191,13 @@ func (c *Controller) isBeingUsed(pv *v1.PersistentVolume) bool {
} }
// pvAddedUpdated reacts to pv added/updated events // pvAddedUpdated reacts to pv added/updated events
func (c *Controller) pvAddedUpdated(obj interface{}) { func (c *Controller) pvAddedUpdated(logger klog.Logger, obj interface{}) {
pv, ok := obj.(*v1.PersistentVolume) pv, ok := obj.(*v1.PersistentVolume)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("PV informer returned non-PV object: %#v", obj)) utilruntime.HandleError(fmt.Errorf("PV informer returned non-PV object: %#v", obj))
return return
} }
klog.V(4).Infof("Got event on PV %s", pv.Name) logger.V(4).Info("Got event on PV", "PV", klog.KObj(pv))
if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) || protectionutil.IsDeletionCandidate(pv, volumeutil.PVProtectionFinalizer) { if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) || protectionutil.IsDeletionCandidate(pv, volumeutil.PVProtectionFinalizer) {
c.queue.Add(pv.Name) c.queue.Add(pv.Name)

View File

@ -35,6 +35,7 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing" clienttesting "k8s.io/client-go/testing"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
@ -210,11 +211,12 @@ func TestPVProtectionController(t *testing.T) {
} }
// Create the controller // Create the controller
ctrl := NewPVProtectionController(pvInformer, client) logger, _ := ktesting.NewTestContext(t)
ctrl := NewPVProtectionController(logger, pvInformer, client)
// Start the test by simulating an event // Start the test by simulating an event
if test.updatedPV != nil { if test.updatedPV != nil {
ctrl.pvAddedUpdated(test.updatedPV) ctrl.pvAddedUpdated(logger, test.updatedPV)
} }
// Process the controller queue until we get expected results // Process the controller queue until we get expected results

View File

@ -359,12 +359,13 @@ type mountedPod struct {
} }
func (asw *actualStateOfWorld) MarkVolumeAsAttached( func (asw *actualStateOfWorld) MarkVolumeAsAttached(
logger klog.Logger,
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error { volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
return asw.addVolume(volumeName, volumeSpec, devicePath) return asw.addVolume(volumeName, volumeSpec, devicePath)
} }
func (asw *actualStateOfWorld) MarkVolumeAsUncertain( func (asw *actualStateOfWorld) MarkVolumeAsUncertain(
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error { logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error {
return nil return nil
} }
@ -473,7 +474,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted(markVolumeOpts operationexecu
return asw.AddPodToVolume(markVolumeOpts) return asw.AddPodToVolume(markVolumeOpts)
} }
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) { func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// no operation for kubelet side // no operation for kubelet side
} }
@ -770,7 +771,7 @@ func (asw *actualStateOfWorld) SetDeviceMountState(
return nil return nil
} }
func (asw *actualStateOfWorld) InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) { func (asw *actualStateOfWorld) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()

View File

@ -28,6 +28,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@ -71,7 +72,8 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
} }
// Act // Act
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
// Assert // Assert
if err != nil { if err != nil {
@ -115,7 +117,8 @@ func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing.
volumeName := v1.UniqueVolumeName("this-would-never-be-a-volume-name") volumeName := v1.UniqueVolumeName("this-would-never-be-a-volume-name")
// Act // Act
err := asw.MarkVolumeAsAttached(volumeName, volumeSpec, "" /* nodeName */, devicePath) logger, _ := ktesting.NewTestContext(t)
err := asw.MarkVolumeAsAttached(logger, volumeName, volumeSpec, "" /* nodeName */, devicePath)
// Assert // Assert
if err != nil { if err != nil {
@ -159,14 +162,14 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
} }
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
// Act // Act
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
// Assert // Assert
if err != nil { if err != nil {
@ -210,8 +213,8 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
} }
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -286,8 +289,8 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
} }
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -394,8 +397,8 @@ func Test_AddTwoPodsToVolume_Positive(t *testing.T) {
generatedVolumeName1, generatedVolumeName1,
generatedVolumeName2, volumeSpec1, volumeSpec2) generatedVolumeName2, volumeSpec1, volumeSpec2)
} }
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -534,8 +537,8 @@ func TestActualStateOfWorld_FoundDuringReconstruction(t *testing.T) {
generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec( generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec(
plugin, volumeSpec1) plugin, volumeSpec1)
require.NoError(t, err) require.NoError(t, err)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -611,8 +614,9 @@ func Test_MarkVolumeAsDetached_Negative_PodInVolume(t *testing.T) {
}, },
}, },
} }
logger, _ := ktesting.NewTestContext(t)
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err := asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -801,8 +805,8 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
} }
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -854,8 +858,8 @@ func Test_AddPodToVolume_Positive_SELinux(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
} }
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -933,8 +937,8 @@ func Test_MarkDeviceAsMounted_Positive_SELinux(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
} }
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }
@ -980,8 +984,8 @@ func TestUncertainVolumeMounts(t *testing.T) {
generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec( generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec(
plugin, volumeSpec1) plugin, volumeSpec1)
require.NoError(t, err) require.NoError(t, err)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }

View File

@ -17,6 +17,7 @@ limitations under the License.
package metrics package metrics
import ( import (
"k8s.io/klog/v2/ktesting"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -74,7 +75,8 @@ func TestMetricCollection(t *testing.T) {
// Add one volume to ActualStateOfWorld // Add one volume to ActualStateOfWorld
devicePath := "fake/device/path" devicePath := "fake/device/path"
err = asw.MarkVolumeAsAttached("", volumeSpec, "", devicePath) logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, "", volumeSpec, "", devicePath)
if err != nil { if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err) t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
} }

View File

@ -374,7 +374,7 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
dswp.desiredStateOfWorld.UpdatePersistentVolumeSize(uniqueVolumeName, pvCap) dswp.desiredStateOfWorld.UpdatePersistentVolumeSize(uniqueVolumeName, pvCap)
// in case the actualStateOfWorld was rebuild after kubelet restart ensure that claimSize is set to accurate value // in case the actualStateOfWorld was rebuild after kubelet restart ensure that claimSize is set to accurate value
dswp.actualStateOfWorld.InitializeClaimSize(uniqueVolumeName, pvcStatusCap) dswp.actualStateOfWorld.InitializeClaimSize(klog.TODO(), uniqueVolumeName, pvcStatusCap)
} }
func getUniqueVolumeName( func getUniqueVolumeName(

View File

@ -17,6 +17,7 @@ limitations under the License.
package populator package populator
import ( import (
"k8s.io/klog/v2/ktesting"
"testing" "testing"
"time" "time"
@ -134,7 +135,8 @@ func TestFindAndAddNewPods_WithRescontructedVolume(t *testing.T) {
VolumeSpec: volume.NewSpecFromPersistentVolume(pv, false), VolumeSpec: volume.NewSpecFromPersistentVolume(pv, false),
VolumeMountState: operationexecutor.VolumeMounted, VolumeMountState: operationexecutor.VolumeMounted,
} }
dswp.actualStateOfWorld.MarkVolumeAsAttached(opts.VolumeName, opts.VolumeSpec, "fake-node", "") logger, _ := ktesting.NewTestContext(t)
dswp.actualStateOfWorld.MarkVolumeAsAttached(logger, opts.VolumeName, opts.VolumeSpec, "fake-node", "")
dswp.actualStateOfWorld.MarkVolumeAsMounted(opts) dswp.actualStateOfWorld.MarkVolumeAsMounted(opts)
dswp.findAndAddNewPods() dswp.findAndAddNewPods()
@ -1393,8 +1395,9 @@ func volumeCapacity(size int) v1.ResourceList {
} }
func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) { func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
for _, volumeToMount := range dsw.GetVolumesToMount() { for _, volumeToMount := range dsw.GetVolumesToMount() {
err := asw.MarkVolumeAsAttached(volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "") err := asw.MarkVolumeAsAttached(logger, volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "")
if err != nil { if err != nil {
t.Fatalf("Unexpected error when MarkVolumeAsAttached: %v", err) t.Fatalf("Unexpected error when MarkVolumeAsAttached: %v", err)
} }

View File

@ -233,6 +233,7 @@ func (rc *reconciler) mountAttachedVolumes(volumeToMount cache.VolumeToMount, po
} }
func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) { func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) {
logger := klog.TODO()
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable { if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
//// lets not spin a goroutine and unnecessarily trigger exponential backoff if this happens //// lets not spin a goroutine and unnecessarily trigger exponential backoff if this happens
if volumeToMount.PluginIsAttachable && !volumeToMount.ReportedInUse { if volumeToMount.PluginIsAttachable && !volumeToMount.ReportedInUse {
@ -243,6 +244,7 @@ func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) {
// for controller to finish attaching volume. // for controller to finish attaching volume.
klog.V(5).InfoS(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""), "pod", klog.KObj(volumeToMount.Pod)) klog.V(5).InfoS(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.VerifyControllerAttachedVolume( err := rc.operationExecutor.VerifyControllerAttachedVolume(
logger,
volumeToMount.VolumeToMount, volumeToMount.VolumeToMount,
rc.nodeName, rc.nodeName,
rc.actualStateOfWorld) rc.actualStateOfWorld)
@ -261,7 +263,7 @@ func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) {
NodeName: rc.nodeName, NodeName: rc.nodeName,
} }
klog.V(5).InfoS(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""), "pod", klog.KObj(volumeToMount.Pod)) klog.V(5).InfoS(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld) err := rc.operationExecutor.AttachVolume(logger, volumeToAttach, rc.actualStateOfWorld)
if err != nil && !isExpectedError(err) { if err != nil && !isExpectedError(err) {
klog.ErrorS(err, volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error(), "pod", klog.KObj(volumeToMount.Pod)) klog.ErrorS(err, volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error(), "pod", klog.KObj(volumeToMount.Pod))
} }
@ -297,7 +299,7 @@ func (rc *reconciler) unmountDetachDevices() {
// Only detach if kubelet detach is enabled // Only detach if kubelet detach is enabled
klog.V(5).InfoS(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", "")) klog.V(5).InfoS(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
err := rc.operationExecutor.DetachVolume( err := rc.operationExecutor.DetachVolume(
attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld) klog.TODO(), attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
if err != nil && !isExpectedError(err) { if err != nil && !isExpectedError(err) {
klog.ErrorS(err, attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) klog.ErrorS(err, attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
} }

View File

@ -40,6 +40,7 @@ import (
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@ -2425,7 +2426,7 @@ func TestSyncStates(t *testing.T) {
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths) rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths)
rcInstance, _ := rc.(*reconciler) rcInstance, _ := rc.(*reconciler)
logger, _ := ktesting.NewTestContext(t)
for _, tpodInfo := range tc.podInfos { for _, tpodInfo := range tc.podInfos {
pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName) pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName)
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
@ -2435,7 +2436,7 @@ func TestSyncStates(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("error adding volume %s to dsow: %v", volumeSpec.Name(), err) t.Fatalf("error adding volume %s to dsow: %v", volumeSpec.Name(), err)
} }
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "") rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
} }
rcInstance.syncStates(tmpKubeletPodDir) rcInstance.syncStates(tmpKubeletPodDir)

View File

@ -140,7 +140,7 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*gl
for _, gvl := range volumesNeedUpdate { for _, gvl := range volumesNeedUpdate {
err := rc.actualStateOfWorld.MarkVolumeAsAttached( err := rc.actualStateOfWorld.MarkVolumeAsAttached(
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108 //TODO: the devicePath might not be correct for some volume plugins: see issue #54108
gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath) klog.TODO(), gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
if err != nil { if err != nil {
klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName) klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
continue continue

View File

@ -107,7 +107,7 @@ func (rc *reconciler) updateStatesNew(reconstructedVolumes map[v1.UniqueVolumeNa
for _, gvl := range reconstructedVolumes { for _, gvl := range reconstructedVolumes {
err := rc.actualStateOfWorld.MarkVolumeAsAttached( err := rc.actualStateOfWorld.MarkVolumeAsAttached(
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108 //TODO: the devicePath might not be correct for some volume plugins: see issue #54108
gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath) klog.TODO(), gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
if err != nil { if err != nil {
klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName) klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
continue continue

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@ -205,7 +206,7 @@ func TestCleanOrphanVolumes(t *testing.T) {
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths) rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths)
rcInstance, _ := rc.(*reconciler) rcInstance, _ := rc.(*reconciler)
rcInstance.volumesFailedReconstruction = tc.volumesFailedReconstruction rcInstance.volumesFailedReconstruction = tc.volumesFailedReconstruction
logger, _ := ktesting.NewTestContext(t)
for _, tpodInfo := range tc.podInfos { for _, tpodInfo := range tc.podInfos {
pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName) pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName)
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
@ -215,7 +216,7 @@ func TestCleanOrphanVolumes(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err) t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err)
} }
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "") rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
} }
// Act // Act
@ -324,7 +325,8 @@ func TestReconstructVolumesMount(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err) t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err)
} }
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "") logger, _ := ktesting.NewTestContext(t)
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
rcInstance.populatorHasAddedPods = func() bool { rcInstance.populatorHasAddedPods = func() bool {
// Mark DSW populated to allow unmounting of volumes. // Mark DSW populated to allow unmounting of volumes.

View File

@ -43,6 +43,7 @@ import (
k8stesting "k8s.io/client-go/testing" k8stesting "k8s.io/client-go/testing"
"k8s.io/component-helpers/storage/volume" "k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
) )
@ -150,7 +151,8 @@ type testEnv struct {
func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv { func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv {
client := &fake.Clientset{} client := &fake.Clientset{}
reactor := pvtesting.NewVolumeReactor(client, nil, nil, nil) _, ctx := ktesting.NewTestContext(t)
reactor := pvtesting.NewVolumeReactor(ctx, client, nil, nil, nil)
// TODO refactor all tests to use real watch mechanism, see #72327 // TODO refactor all tests to use real watch mechanism, see #72327
client.AddWatchReactor("*", func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { client.AddWatchReactor("*", func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource() gvr := action.GetResource()

View File

@ -24,7 +24,6 @@ import (
"strings" "strings"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -62,7 +61,7 @@ type azureFileDeleter struct {
azureProvider azureCloudProvider azureProvider azureCloudProvider
} }
func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *azureFilePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil { if err != nil {
klog.V(4).Infof("failed to get azure provider") klog.V(4).Infof("failed to get azure provider")
@ -102,7 +101,7 @@ func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureU
} }
} }
func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *azureFilePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil { if err != nil {
klog.V(4).Infof("failed to get azure provider") klog.V(4).Infof("failed to get azure provider")

View File

@ -228,7 +228,7 @@ func (plugin *gcePersistentDiskPlugin) newUnmounterInternal(volName string, podU
}}, nil }}, nil
} }
func (plugin *gcePersistentDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *gcePersistentDiskPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &GCEDiskUtil{}) return plugin.newDeleterInternal(spec, &GCEDiskUtil{})
} }
@ -245,7 +245,7 @@ func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, man
}}, nil }}, nil
} }
func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *gcePersistentDiskPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &GCEDiskUtil{}) return plugin.newProvisionerInternal(options, &GCEDiskUtil{})
} }

View File

@ -18,6 +18,7 @@ package hostpath
import ( import (
"fmt" "fmt"
"k8s.io/klog/v2"
"os" "os"
"regexp" "regexp"
@ -172,11 +173,11 @@ func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRec
return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder) return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
} }
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *hostPathPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return newDeleter(spec, plugin.host) return newDeleter(spec, plugin.host)
} }
func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *hostPathPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
if !plugin.config.ProvisioningEnabled { if !plugin.config.ProvisioningEnabled {
return nil, fmt.Errorf("provisioning in volume plugin %q is disabled", plugin.GetPluginName()) return nil, fmt.Errorf("provisioning in volume plugin %q is disabled", plugin.GetPluginName())
} }

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing" volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/hostutil"
@ -111,7 +112,8 @@ func TestDeleter(t *testing.T) {
if err != nil { if err != nil {
t.Fatal("Can't find the plugin by name") t.Fatal("Can't find the plugin by name")
} }
deleter, err := plug.NewDeleter(spec) logger, _ := ktesting.NewTestContext(t)
deleter, err := plug.NewDeleter(logger, spec)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Deleter: %v", err) t.Errorf("Failed to make a new Deleter: %v", err)
} }
@ -135,13 +137,13 @@ func TestDeleterTempDir(t *testing.T) {
"not-tmp": {true, "/nottmp"}, "not-tmp": {true, "/nottmp"},
"good-tmp": {false, "/tmp/scratch"}, "good-tmp": {false, "/tmp/scratch"},
} }
logger, _ := ktesting.NewTestContext(t)
for name, test := range tests { for name, test := range tests {
plugMgr := volume.VolumePluginMgr{} plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}} spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
plug, _ := plugMgr.FindDeletablePluginBySpec(spec) plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
deleter, _ := plug.NewDeleter(spec) deleter, _ := plug.NewDeleter(logger, spec)
err := deleter.Delete() err := deleter.Delete()
if err == nil && test.expectedFailure { if err == nil && test.expectedFailure {
t.Errorf("Expected failure for test '%s' but got nil err", name) t.Errorf("Expected failure for test '%s' but got nil err", name)
@ -167,7 +169,8 @@ func TestProvisioner(t *testing.T) {
PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete, PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
} }
creator, err := plug.NewProvisioner(options) logger, _ := ktesting.NewTestContext(t)
creator, err := plug.NewProvisioner(logger, options)
if err != nil { if err != nil {
t.Fatalf("Failed to make a new Provisioner: %v", err) t.Fatalf("Failed to make a new Provisioner: %v", err)
} }

View File

@ -213,7 +213,7 @@ type DeletableVolumePlugin interface {
// NewDeleter creates a new volume.Deleter which knows how to delete this // NewDeleter creates a new volume.Deleter which knows how to delete this
// resource in accordance with the underlying storage provider after the // resource in accordance with the underlying storage provider after the
// volume's release from a claim // volume's release from a claim
NewDeleter(spec *Spec) (Deleter, error) NewDeleter(logger klog.Logger, spec *Spec) (Deleter, error)
} }
// ProvisionableVolumePlugin is an extended interface of VolumePlugin and is // ProvisionableVolumePlugin is an extended interface of VolumePlugin and is
@ -223,7 +223,7 @@ type ProvisionableVolumePlugin interface {
// NewProvisioner creates a new volume.Provisioner which knows how to // NewProvisioner creates a new volume.Provisioner which knows how to
// create PersistentVolumes in accordance with the plugin's underlying // create PersistentVolumes in accordance with the plugin's underlying
// storage provider // storage provider
NewProvisioner(options VolumeOptions) (Provisioner, error) NewProvisioner(logger klog.Logger, options VolumeOptions) (Provisioner, error)
} }
// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment // AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment

View File

@ -160,7 +160,7 @@ func (plugin *portworxVolumePlugin) newUnmounterInternal(volName string, podUID
}}, nil }}, nil
} }
func (plugin *portworxVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *portworxVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, plugin.util) return plugin.newDeleterInternal(spec, plugin.util)
} }
@ -178,7 +178,7 @@ func (plugin *portworxVolumePlugin) newDeleterInternal(spec *volume.Spec, manage
}}, nil }}, nil
} }
func (plugin *portworxVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *portworxVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, plugin.util) return plugin.newProvisionerInternal(options, plugin.util)
} }

View File

@ -592,7 +592,7 @@ func (plugin *rbdPlugin) getDeviceNameFromOldMountPath(mounter mount.Interface,
return "", fmt.Errorf("can't find source name from mounted path: %s", mountPath) return "", fmt.Errorf("can't find source name from mounted path: %s", mountPath)
} }
func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *rbdPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil {
return nil, fmt.Errorf("spec.PersistentVolume.Spec.RBD is nil") return nil, fmt.Errorf("spec.PersistentVolume.Spec.RBD is nil")
} }
@ -615,7 +615,7 @@ func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret str
}}, nil }}, nil
} }
func (plugin *rbdPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *rbdPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &rbdUtil{}) return plugin.newProvisionerInternal(options, &rbdUtil{})
} }

View File

@ -18,6 +18,7 @@ package testing
import ( import (
"fmt" "fmt"
"k8s.io/klog/v2"
"os" "os"
"path/filepath" "path/filepath"
goruntime "runtime" goruntime "runtime"
@ -441,11 +442,11 @@ func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventR
return nil return nil
} }
func (plugin *FakeVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *FakeVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return &FakeDeleter{"/attributesTransferredFromSpec", volume.MetricsNil{}}, nil return &FakeDeleter{"/attributesTransferredFromSpec", volume.MetricsNil{}}, nil
} }
func (plugin *FakeVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *FakeVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
plugin.Lock() plugin.Lock()
defer plugin.Unlock() defer plugin.Unlock()
plugin.LastProvisionerOptions = options plugin.LastProvisionerOptions = options

View File

@ -17,10 +17,11 @@ limitations under the License.
package operationexecutor package operationexecutor
import ( import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog/v2"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
@ -54,11 +55,11 @@ func (f *fakeOGCounter) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume,
return f.recordFuncCall("GenerateUnmountVolumeFunc"), nil return f.recordFuncCall("GenerateUnmountVolumeFunc"), nil
} }
func (f *fakeOGCounter) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { func (f *fakeOGCounter) GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
return f.recordFuncCall("GenerateAttachVolumeFunc") return f.recordFuncCall("GenerateAttachVolumeFunc")
} }
func (f *fakeOGCounter) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { func (f *fakeOGCounter) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateDetachVolumeFunc"), nil return f.recordFuncCall("GenerateDetachVolumeFunc"), nil
} }
@ -70,7 +71,7 @@ func (f *fakeOGCounter) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume,
return f.recordFuncCall("GenerateUnmountDeviceFunc"), nil return f.recordFuncCall("GenerateUnmountDeviceFunc"), nil
} }
func (f *fakeOGCounter) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { func (f *fakeOGCounter) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateVerifyControllerAttachedVolumeFunc"), nil return f.recordFuncCall("GenerateVerifyControllerAttachedVolumeFunc"), nil
} }

View File

@ -65,7 +65,7 @@ import (
type OperationExecutor interface { type OperationExecutor interface {
// AttachVolume attaches the volume to the node specified in volumeToAttach. // AttachVolume attaches the volume to the node specified in volumeToAttach.
// It then updates the actual state of the world to reflect that. // It then updates the actual state of the world to reflect that.
AttachVolume(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error AttachVolume(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// VerifyVolumesAreAttachedPerNode verifies the given list of volumes to see whether they are still attached to the node. // VerifyVolumesAreAttachedPerNode verifies the given list of volumes to see whether they are still attached to the node.
// If any volume is not attached right now, it will update the actual state of the world to reflect that. // If any volume is not attached right now, it will update the actual state of the world to reflect that.
@ -83,7 +83,7 @@ type OperationExecutor interface {
// that. If verifySafeToDetach is set, a call is made to the fetch the node // that. If verifySafeToDetach is set, a call is made to the fetch the node
// object and it is used to verify that the volume does not exist in Node's // object and it is used to verify that the volume does not exist in Node's
// Status.VolumesInUse list (operation fails with error if it is). // Status.VolumesInUse list (operation fails with error if it is).
DetachVolume(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error DetachVolume(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// If a volume has 'Filesystem' volumeMode, MountVolume mounts the // If a volume has 'Filesystem' volumeMode, MountVolume mounts the
// volume to the pod specified in volumeToMount. // volume to the pod specified in volumeToMount.
@ -139,7 +139,7 @@ type OperationExecutor interface {
// If the volume is not found or there is an error (fetching the node // If the volume is not found or there is an error (fetching the node
// object, for example) then an error is returned which triggers exponential // object, for example) then an error is returned which triggers exponential
// back off on retries. // back off on retries.
VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error VerifyControllerAttachedVolume(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// IsOperationPending returns true if an operation for the given volumeName // IsOperationPending returns true if an operation for the given volumeName
// and one of podName or nodeName is pending, otherwise it returns false // and one of podName or nodeName is pending, otherwise it returns false
@ -245,13 +245,13 @@ type ActualStateOfWorldAttacherUpdater interface {
// TODO: in the future, we should be able to remove the volumeName // TODO: in the future, we should be able to remove the volumeName
// argument to this method -- since it is used only for attachable // argument to this method -- since it is used only for attachable
// volumes. See issue 29695. // volumes. See issue 29695.
MarkVolumeAsAttached(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error MarkVolumeAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
// Marks the specified volume as *possibly* attached to the specified node. // Marks the specified volume as *possibly* attached to the specified node.
// If an attach operation fails, the attach/detach controller does not know for certain if the volume is attached or not. // If an attach operation fails, the attach/detach controller does not know for certain if the volume is attached or not.
// If the volume name is supplied, that volume name will be used. If not, the // If the volume name is supplied, that volume name will be used. If not, the
// volume name is computed using the result from querying the plugin. // volume name is computed using the result from querying the plugin.
MarkVolumeAsUncertain(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error MarkVolumeAsUncertain(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error
// Marks the specified volume as detached from the specified node // Marks the specified volume as detached from the specified node
MarkVolumeAsDetached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) MarkVolumeAsDetached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
@ -262,10 +262,10 @@ type ActualStateOfWorldAttacherUpdater interface {
// Unmarks the desire to detach for the specified volume (add the volume back to // Unmarks the desire to detach for the specified volume (add the volume back to
// the node's volumesToReportAsAttached list) // the node's volumesToReportAsAttached list)
AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// InitializeClaimSize sets pvc claim size by reading pvc.Status.Capacity // InitializeClaimSize sets pvc claim size by reading pvc.Status.Capacity
InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity)
GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity
} }
@ -789,10 +789,11 @@ func (oe *operationExecutor) IsOperationSafeToRetry(
} }
func (oe *operationExecutor) AttachVolume( func (oe *operationExecutor) AttachVolume(
logger klog.Logger,
volumeToAttach VolumeToAttach, volumeToAttach VolumeToAttach,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations := generatedOperations :=
oe.operationGenerator.GenerateAttachVolumeFunc(volumeToAttach, actualStateOfWorld) oe.operationGenerator.GenerateAttachVolumeFunc(logger, volumeToAttach, actualStateOfWorld)
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) { if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
return oe.pendingOperations.Run( return oe.pendingOperations.Run(
@ -804,11 +805,12 @@ func (oe *operationExecutor) AttachVolume(
} }
func (oe *operationExecutor) DetachVolume( func (oe *operationExecutor) DetachVolume(
logger klog.Logger,
volumeToDetach AttachedVolume, volumeToDetach AttachedVolume,
verifySafeToDetach bool, verifySafeToDetach bool,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err := generatedOperations, err :=
oe.operationGenerator.GenerateDetachVolumeFunc(volumeToDetach, verifySafeToDetach, actualStateOfWorld) oe.operationGenerator.GenerateDetachVolumeFunc(logger, volumeToDetach, verifySafeToDetach, actualStateOfWorld)
if err != nil { if err != nil {
return err return err
} }
@ -1039,11 +1041,12 @@ func (oe *operationExecutor) ExpandInUseVolume(volumeToMount VolumeToMount, actu
} }
func (oe *operationExecutor) VerifyControllerAttachedVolume( func (oe *operationExecutor) VerifyControllerAttachedVolume(
logger klog.Logger,
volumeToMount VolumeToMount, volumeToMount VolumeToMount,
nodeName types.NodeName, nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err := generatedOperations, err :=
oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld) oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(logger, volumeToMount, nodeName, actualStateOfWorld)
if err != nil { if err != nil {
return err return err
} }

View File

@ -18,16 +18,18 @@ package operationexecutor
import ( import (
"fmt" "fmt"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/klog/v2"
"strconv" "strconv"
"testing" "testing"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/hostutil" "k8s.io/kubernetes/pkg/volume/util/hostutil"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types" volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
@ -59,7 +61,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachableAndNonDevi
// Act // Act
for i := range volumesToMount { for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1)) podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithSecret(podName, secretName) pod := getTestPodWithSecret(podName, secretName)
volumesToMount[i] = VolumeToMount{ volumesToMount[i] = VolumeToMount{
Pod: pod, Pod: pod,
@ -87,7 +89,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins(t *te
volumeName := v1.UniqueVolumeName(pdName) volumeName := v1.UniqueVolumeName(pdName)
// Act // Act
for i := range volumesToMount { for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1)) podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithGCEPD(podName, pdName) pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{ volumesToMount[i] = VolumeToMount{
Pod: pod, Pod: pod,
@ -114,7 +116,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForDeviceMountablePlugins(
volumeName := v1.UniqueVolumeName(pdName) volumeName := v1.UniqueVolumeName(pdName)
// Act // Act
for i := range volumesToMount { for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1)) podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithGCEPD(podName, pdName) pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{ volumesToMount[i] = VolumeToMount{
Pod: pod, Pod: pod,
@ -209,7 +211,8 @@ func TestOperationExecutor_AttachSingleNodeVolumeConcurrentlyToSameNode(t *testi
}, },
}, },
} }
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
} }
// Assert // Assert
@ -239,7 +242,8 @@ func TestOperationExecutor_AttachMultiNodeVolumeConcurrentlyToSameNode(t *testin
}, },
}, },
} }
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
} }
// Assert // Assert
@ -269,7 +273,8 @@ func TestOperationExecutor_AttachSingleNodeVolumeConcurrentlyToDifferentNodes(t
}, },
}, },
} }
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
} }
// Assert // Assert
@ -297,7 +302,8 @@ func TestOperationExecutor_AttachMultiNodeVolumeConcurrentlyToDifferentNodes(t *
}, },
}, },
} }
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
} }
// Assert // Assert
@ -327,7 +333,8 @@ func TestOperationExecutor_DetachSingleNodeVolumeConcurrentlyFromSameNode(t *tes
}, },
}, },
} }
oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
} }
// Assert // Assert
@ -357,7 +364,8 @@ func TestOperationExecutor_DetachMultiNodeVolumeConcurrentlyFromSameNode(t *test
}, },
}, },
} }
oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
} }
// Assert // Assert
@ -385,7 +393,8 @@ func TestOperationExecutor_DetachMultiNodeVolumeConcurrentlyFromDifferentNodes(t
}, },
}, },
} }
oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
} }
// Assert // Assert
@ -440,7 +449,8 @@ func TestOperationExecutor_VerifyControllerAttachedVolumeConcurrently(t *testing
volumesToMount[i] = VolumeToMount{ volumesToMount[i] = VolumeToMount{
VolumeName: v1.UniqueVolumeName(pdName), VolumeName: v1.UniqueVolumeName(pdName),
} }
oe.VerifyControllerAttachedVolume(volumesToMount[i], types.NodeName("node-name"), nil /* actualStateOfWorldMounterUpdater */) logger, _ := ktesting.NewTestContext(t)
oe.VerifyControllerAttachedVolume(logger, volumesToMount[i], types.NodeName("node-name"), nil /* actualStateOfWorldMounterUpdater */)
} }
// Assert // Assert
@ -460,7 +470,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins_Vo
// Act // Act
for i := range volumesToMount { for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1)) podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithSecret(podName, secretName) pod := getTestPodWithSecret(podName, secretName)
volumesToMount[i] = VolumeToMount{ volumesToMount[i] = VolumeToMount{
Pod: pod, Pod: pod,
@ -491,7 +501,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins_Volum
// Act // Act
for i := range volumesToMount { for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1)) podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithGCEPD(podName, pdName) pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{ volumesToMount[i] = VolumeToMount{
Pod: pod, Pod: pod,
@ -603,7 +613,7 @@ func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount Mo
OperationFunc: opFunc, OperationFunc: opFunc,
}, nil }, nil
} }
func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
opFunc := func() volumetypes.OperationContext { opFunc := func() volumetypes.OperationContext {
startOperationAndBlock(fopg.ch, fopg.quit) startOperationAndBlock(fopg.ch, fopg.quit)
return volumetypes.NewOperationContext(nil, nil, false) return volumetypes.NewOperationContext(nil, nil, false)
@ -612,7 +622,7 @@ func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach Volu
OperationFunc: opFunc, OperationFunc: opFunc,
} }
} }
func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() volumetypes.OperationContext { opFunc := func() volumetypes.OperationContext {
startOperationAndBlock(fopg.ch, fopg.quit) startOperationAndBlock(fopg.ch, fopg.quit)
return volumetypes.NewOperationContext(nil, nil, false) return volumetypes.NewOperationContext(nil, nil, false)
@ -639,7 +649,7 @@ func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach Att
OperationFunc: opFunc, OperationFunc: opFunc,
}, nil }, nil
} }
func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() volumetypes.OperationContext { opFunc := func() volumetypes.OperationContext {
startOperationAndBlock(fopg.ch, fopg.quit) startOperationAndBlock(fopg.ch, fopg.quit)
return volumetypes.NewOperationContext(nil, nil, false) return volumetypes.NewOperationContext(nil, nil, false)

View File

@ -121,10 +121,10 @@ type OperationGenerator interface {
GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error)
// Generates the AttachVolume function needed to perform attach of a volume plugin // Generates the AttachVolume function needed to perform attach of a volume plugin
GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations
// Generates the DetachVolume function needed to perform the detach of a volume plugin // Generates the DetachVolume function needed to perform the detach of a volume plugin
GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
// Generates the VolumesAreAttached function needed to verify if volume plugins are attached // Generates the VolumesAreAttached function needed to verify if volume plugins are attached
GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
@ -133,7 +133,7 @@ type OperationGenerator interface {
GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter hostutil.HostUtils) (volumetypes.GeneratedOperations, error) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter hostutil.HostUtils) (volumetypes.GeneratedOperations, error)
// Generates the function needed to check if the attach_detach controller has attached the volume plugin // Generates the function needed to check if the attach_detach controller has attached the volume plugin
GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
// Generates the MapVolume function needed to perform the map of a volume plugin // Generates the MapVolume function needed to perform the map of a volume plugin
GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error)
@ -348,6 +348,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
} }
func (og *operationGenerator) GenerateAttachVolumeFunc( func (og *operationGenerator) GenerateAttachVolumeFunc(
logger klog.Logger,
volumeToAttach VolumeToAttach, volumeToAttach VolumeToAttach,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
@ -378,6 +379,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
uncertainNode = derr.CurrentNode uncertainNode = derr.CurrentNode
} }
addErr := actualStateOfWorld.MarkVolumeAsUncertain( addErr := actualStateOfWorld.MarkVolumeAsUncertain(
logger,
volumeToAttach.VolumeName, volumeToAttach.VolumeName,
volumeToAttach.VolumeSpec, volumeToAttach.VolumeSpec,
uncertainNode) uncertainNode)
@ -399,7 +401,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
// Update actual state of world // Update actual state of world
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath) logger, v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath)
if addVolumeNodeErr != nil { if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry. // On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr)
@ -447,6 +449,7 @@ func (og *operationGenerator) GetCSITranslator() InTreeToCSITranslator {
} }
func (og *operationGenerator) GenerateDetachVolumeFunc( func (og *operationGenerator) GenerateDetachVolumeFunc(
logger klog.Logger,
volumeToDetach AttachedVolume, volumeToDetach AttachedVolume,
verifySafeToDetach bool, verifySafeToDetach bool,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
@ -505,7 +508,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
if err != nil { if err != nil {
// On failure, add volume back to ReportAsAttached list // On failure, add volume back to ReportAsAttached list
actualStateOfWorld.AddVolumeToReportAsAttached( actualStateOfWorld.AddVolumeToReportAsAttached(
volumeToDetach.VolumeName, volumeToDetach.NodeName) logger, volumeToDetach.VolumeName, volumeToDetach.NodeName)
eventErr, detailedErr := volumeToDetach.GenerateError("DetachVolume.Detach failed", err) eventErr, detailedErr := volumeToDetach.GenerateError("DetachVolume.Detach failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
} }
@ -1501,6 +1504,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
} }
func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
logger klog.Logger,
volumeToMount VolumeToMount, volumeToMount VolumeToMount,
nodeName types.NodeName, nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
@ -1548,13 +1552,13 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
// updated accordingly. // updated accordingly.
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */) logger, volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */)
if addVolumeNodeErr != nil { if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry. // On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed", addVolumeNodeErr) eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed", addVolumeNodeErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
} }
actualStateOfWorld.InitializeClaimSize(volumeToMount.VolumeName, claimSize) actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize)
return volumetypes.NewOperationContext(nil, nil, migrated) return volumetypes.NewOperationContext(nil, nil, migrated)
} }
@ -1588,14 +1592,14 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
for _, attachedVolume := range node.Status.VolumesAttached { for _, attachedVolume := range node.Status.VolumesAttached {
if attachedVolume.Name == volumeToMount.VolumeName { if attachedVolume.Name == volumeToMount.VolumeName {
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath) logger, v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath)
klog.InfoS(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath)), "pod", klog.KObj(volumeToMount.Pod)) klog.InfoS(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath)), "pod", klog.KObj(volumeToMount.Pod))
if addVolumeNodeErr != nil { if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry. // On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
} }
actualStateOfWorld.InitializeClaimSize(volumeToMount.VolumeName, claimSize) actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize)
return volumetypes.NewOperationContext(nil, nil, migrated) return volumetypes.NewOperationContext(nil, nil, migrated)
} }
} }

View File

@ -324,7 +324,7 @@ type vsphereVolumeDeleter struct {
var _ volume.Deleter = &vsphereVolumeDeleter{} var _ volume.Deleter = &vsphereVolumeDeleter{}
func (plugin *vsphereVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { func (plugin *vsphereVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &VsphereDiskUtil{}) return plugin.newDeleterInternal(spec, &VsphereDiskUtil{})
} }
@ -353,7 +353,7 @@ type vsphereVolumeProvisioner struct {
var _ volume.Provisioner = &vsphereVolumeProvisioner{} var _ volume.Provisioner = &vsphereVolumeProvisioner{}
func (plugin *vsphereVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { func (plugin *vsphereVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &VsphereDiskUtil{}) return plugin.newProvisionerInternal(options, &VsphereDiskUtil{})
} }

View File

@ -31,6 +31,7 @@ import (
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
fakecloud "k8s.io/cloud-provider/fake" fakecloud "k8s.io/cloud-provider/fake"
"k8s.io/klog/v2/ktesting"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach" "k8s.io/kubernetes/pkg/controller/volume/attachdetach"
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
@ -161,7 +162,6 @@ func TestPodDeletionWithDswp(t *testing.T) {
defer framework.DeleteNamespaceOrDie(testClient, ns, t) defer framework.DeleteNamespaceOrDie(testClient, ns, t)
pod := fakePodWithVol(namespaceName) pod := fakePodWithVol(namespaceName)
podStopCh := make(chan struct{})
if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to created node : %v", err) t.Fatalf("Failed to created node : %v", err)
@ -183,7 +183,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers) initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done()) go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation. // Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx) go pvCtrl.Run(ctx)
@ -201,7 +201,6 @@ func TestPodDeletionWithDswp(t *testing.T) {
waitForPodsInDSWP(t, ctrl.GetDesiredStateOfWorld()) waitForPodsInDSWP(t, ctrl.GetDesiredStateOfWorld())
// let's stop pod events from getting triggered // let's stop pod events from getting triggered
close(podStopCh)
err = podInformer.GetStore().Delete(podInformerObj) err = podInformer.GetStore().Delete(podInformerObj)
if err != nil { if err != nil {
t.Fatalf("Error deleting pod : %v", err) t.Fatalf("Error deleting pod : %v", err)
@ -262,7 +261,7 @@ func TestPodUpdateWithWithADC(t *testing.T) {
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers) initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done()) go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation. // Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx) go pvCtrl.Run(ctx)
@ -335,7 +334,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers) initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done()) go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation. // Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx) go pvCtrl.Run(ctx)
@ -426,7 +425,9 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync
plugins := []volume.VolumePlugin{plugin} plugins := []volume.VolumePlugin{plugin}
cloud := &fakecloud.Cloud{} cloud := &fakecloud.Cloud{}
informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod) informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod)
logger, ctx := ktesting.NewTestContext(t)
ctrl, err := attachdetach.NewAttachDetachController( ctrl, err := attachdetach.NewAttachDetachController(
logger,
testClient, testClient,
informers.Core().V1().Pods(), informers.Core().V1().Pods(),
informers.Core().V1().Nodes(), informers.Core().V1().Nodes(),
@ -463,7 +464,7 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync
NodeInformer: informers.Core().V1().Nodes(), NodeInformer: informers.Core().V1().Nodes(),
EnableDynamicProvisioning: false, EnableDynamicProvisioning: false,
} }
pvCtrl, err := persistentvolume.NewController(params) pvCtrl, err := persistentvolume.NewController(ctx, params)
if err != nil { if err != nil {
t.Fatalf("Failed to create PV controller: %v", err) t.Fatalf("Failed to create PV controller: %v", err)
} }
@ -509,14 +510,15 @@ func TestPodAddedByDswp(t *testing.T) {
go podInformer.Run(podStopCh) go podInformer.Run(podStopCh)
// start controller loop // start controller loop
ctx, cancel := context.WithCancel(context.Background()) _, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel() defer cancel()
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done()) go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done())
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers) initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done()) go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation. // Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx) go pvCtrl.Run(ctx)
@ -605,7 +607,7 @@ func TestPVCBoundWithADC(t *testing.T) {
informers.Start(ctx.Done()) informers.Start(ctx.Done())
informers.WaitForCacheSync(ctx.Done()) informers.WaitForCacheSync(ctx.Done())
initCSIObjects(ctx.Done(), informers) initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done()) go ctrl.Run(ctx)
go pvCtrl.Run(ctx) go pvCtrl.Run(ctx)
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4) waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4)

View File

@ -47,6 +47,7 @@ import (
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
) )
// Several tests in this file are configurable by environment variables: // Several tests in this file are configurable by environment variables:
@ -1357,7 +1358,9 @@ func createClients(namespaceName string, t *testing.T, s *kubeapiservertesting.T
plugins := []volume.VolumePlugin{plugin} plugins := []volume.VolumePlugin{plugin}
cloud := &fakecloud.Cloud{} cloud := &fakecloud.Cloud{}
informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod)) informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod))
_, ctx := ktesting.NewTestContext(t)
ctrl, err := persistentvolumecontroller.NewController( ctrl, err := persistentvolumecontroller.NewController(
ctx,
persistentvolumecontroller.ControllerParameters{ persistentvolumecontroller.ControllerParameters{
KubeClient: binderClient, KubeClient: binderClient,
SyncPeriod: getSyncPeriod(syncPeriod), SyncPeriod: getSyncPeriod(syncPeriod),

View File

@ -28,6 +28,7 @@ import (
"time" "time"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@ -1128,8 +1129,8 @@ func initPVController(t *testing.T, testCtx *testutil.TestContext, provisionDela
NodeInformer: informerFactory.Core().V1().Nodes(), NodeInformer: informerFactory.Core().V1().Nodes(),
EnableDynamicProvisioning: true, EnableDynamicProvisioning: true,
} }
_, ctx := ktesting.NewTestContext(t)
ctrl, err := persistentvolume.NewController(params) ctrl, err := persistentvolume.NewController(ctx, params)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }