diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 59cc5035431..3fa0cf6f742 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -271,7 +271,8 @@ func startPersistentVolumeBinderController(ctx context.Context, controllerContex EnableDynamicProvisioning: controllerContext.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration.EnableDynamicProvisioning, FilteredDialOptions: filteredDialOptions, } - volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params) + ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-binder-controller")) + volumeController, volumeControllerErr := persistentvolumecontroller.NewController(ctx, params) if volumeControllerErr != nil { return nil, true, fmt.Errorf("failed to construct persistentvolume controller: %v", volumeControllerErr) } @@ -295,8 +296,11 @@ func startAttachDetachController(ctx context.Context, controllerContext Controll return nil, true, err } + logger := klog.LoggerWithName(klog.FromContext(ctx), "attachdetach-controller") + ctx = klog.NewContext(ctx, logger) attachDetachController, attachDetachControllerErr := attachdetach.NewAttachDetachController( + logger, controllerContext.ClientBuilder.ClientOrDie("attachdetach-controller"), controllerContext.InformerFactory.Core().V1().Pods(), controllerContext.InformerFactory.Core().V1().Nodes(), @@ -316,7 +320,7 @@ func startAttachDetachController(ctx context.Context, controllerContext Controll if attachDetachControllerErr != nil { return nil, true, fmt.Errorf("failed to start attach/detach controller: %v", attachDetachControllerErr) } - go attachDetachController.Run(ctx.Done()) + go attachDetachController.Run(ctx) return nil, true, nil } @@ -346,12 +350,14 @@ func startVolumeExpandController(ctx context.Context, controllerContext Controll if expandControllerErr != nil { return nil, true, fmt.Errorf("failed to start volume expand controller: %v", expandControllerErr) } + ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-expander-controller")) go expandController.Run(ctx) return nil, true, nil } func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { + ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "ephemeral-volume-controller")) ephemeralController, err := ephemeral.NewController( controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"), controllerContext.InformerFactory.Core().V1().Pods(), @@ -548,7 +554,9 @@ func startGarbageCollectorController(ctx context.Context, controllerContext Cont } func startPVCProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { + ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolumeclaim-protection-controller")) pvcProtectionController, err := pvcprotection.NewPVCProtectionController( + klog.FromContext(ctx), controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(), controllerContext.InformerFactory.Core().V1().Pods(), controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"), @@ -561,7 +569,9 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control } func startPVProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { + ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-protection-controller")) go pvprotection.NewPVProtectionController( + klog.FromContext(ctx), controllerContext.InformerFactory.Core().V1().PersistentVolumes(), controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"), ).Run(ctx, 1) diff --git a/pkg/controller/resourceclaim/controller.go b/pkg/controller/resourceclaim/controller.go index 811e2dfa63e..85930e8e762 100644 --- a/pkg/controller/resourceclaim/controller.go +++ b/pkg/controller/resourceclaim/controller.go @@ -384,7 +384,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1. } func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) error { - logger := klog.LoggerWithValues(klog.FromContext(ctx), "claim", klog.KRef(namespace, name)) + logger := klog.LoggerWithValues(klog.FromContext(ctx), "PVC", klog.KRef(namespace, name)) ctx = klog.NewContext(ctx, logger) claim, err := ec.claimLister.ResourceClaims(namespace).Get(name) if err != nil { diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller.go b/pkg/controller/volume/attachdetach/attach_detach_controller.go index 967f40f172b..5f61f227019 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller.go @@ -19,6 +19,7 @@ limitations under the License. package attachdetach import ( + "context" "fmt" "net" "time" @@ -99,12 +100,13 @@ var DefaultTimerConfig = TimerConfig{ // AttachDetachController defines the operations supported by this controller. type AttachDetachController interface { - Run(stopCh <-chan struct{}) + Run(ctx context.Context) GetDesiredStateOfWorld() cache.DesiredStateOfWorld } // NewAttachDetachController returns a new instance of AttachDetachController. func NewAttachDetachController( + logger klog.Logger, kubeClient clientset.Interface, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, @@ -194,9 +196,15 @@ func NewAttachDetachController( adc.intreeToCSITranslator) podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ - AddFunc: adc.podAdd, - UpdateFunc: adc.podUpdate, - DeleteFunc: adc.podDelete, + AddFunc: func(obj interface{}) { + adc.podAdd(logger, obj) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + adc.podUpdate(logger, oldObj, newObj) + }, + DeleteFunc: func(obj interface{}) { + adc.podDelete(logger, obj) + }, }) // This custom indexer will index pods by its PVC keys. Then we don't need @@ -206,9 +214,15 @@ func NewAttachDetachController( } nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ - AddFunc: adc.nodeAdd, - UpdateFunc: adc.nodeUpdate, - DeleteFunc: adc.nodeDelete, + AddFunc: func(obj interface{}) { + adc.nodeAdd(logger, obj) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + adc.nodeUpdate(logger, oldObj, newObj) + }, + DeleteFunc: func(obj interface{}) { + adc.nodeDelete(logger, obj) + }, }) pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{ @@ -316,7 +330,7 @@ type attachDetachController struct { filteredDialOptions *proxyutil.FilteredDialOptions } -func (adc *attachDetachController) Run(stopCh <-chan struct{}) { +func (adc *attachDetachController) Run(ctx context.Context) { defer runtime.HandleCrash() defer adc.pvcQueue.ShutDown() @@ -325,8 +339,9 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) { adc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: adc.kubeClient.CoreV1().Events("")}) defer adc.broadcaster.Shutdown() - klog.Infof("Starting attach detach controller") - defer klog.Infof("Shutting down attach detach controller") + logger := klog.FromContext(ctx) + logger.Info("Starting attach detach controller") + defer logger.Info("Shutting down attach detach controller") synced := []kcache.InformerSynced{adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced} if adc.csiNodeSynced != nil { @@ -339,21 +354,21 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) { synced = append(synced, adc.volumeAttachmentSynced) } - if !kcache.WaitForNamedCacheSync("attach detach", stopCh, synced...) { + if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(), synced...) { return } - err := adc.populateActualStateOfWorld() + err := adc.populateActualStateOfWorld(logger) if err != nil { - klog.Errorf("Error populating the actual state of world: %v", err) + logger.Error(err, "Error populating the actual state of world") } - err = adc.populateDesiredStateOfWorld() + err = adc.populateDesiredStateOfWorld(logger) if err != nil { - klog.Errorf("Error populating the desired state of world: %v", err) + logger.Error(err, "Error populating the desired state of world") } - go adc.reconciler.Run(stopCh) - go adc.desiredStateOfWorldPopulator.Run(stopCh) - go wait.Until(adc.pvcWorker, time.Second, stopCh) + go adc.reconciler.Run(ctx) + go adc.desiredStateOfWorldPopulator.Run(ctx) + go wait.UntilWithContext(ctx, adc.pvcWorker, time.Second) metrics.Register(adc.pvcLister, adc.pvLister, adc.podLister, @@ -363,11 +378,11 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) { adc.csiMigratedPluginManager, adc.intreeToCSITranslator) - <-stopCh + <-ctx.Done() } -func (adc *attachDetachController) populateActualStateOfWorld() error { - klog.V(5).Infof("Populating ActualStateOfworld") +func (adc *attachDetachController) populateActualStateOfWorld(logger klog.Logger) error { + logger.V(5).Info("Populating ActualStateOfworld") nodes, err := adc.nodeLister.List(labels.Everything()) if err != nil { return err @@ -382,18 +397,18 @@ func (adc *attachDetachController) populateActualStateOfWorld() error { // volume spec is not needed to detach a volume. If the volume is used by a pod, it // its spec can be: this would happen during in the populateDesiredStateOfWorld which // scans the pods and updates their volumes in the ActualStateOfWorld too. - err = adc.actualStateOfWorld.MarkVolumeAsAttached(uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath) + err = adc.actualStateOfWorld.MarkVolumeAsAttached(logger, uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath) if err != nil { - klog.Errorf("Failed to mark the volume as attached: %v", err) + logger.Error(err, "Failed to mark the volume as attached") continue } - adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) + adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse) adc.addNodeToDswp(node, types.NodeName(node.Name)) } } - err = adc.processVolumeAttachments() + err = adc.processVolumeAttachments(logger) if err != nil { - klog.Errorf("Failed to process volume attachments: %v", err) + logger.Error(err, "Failed to process volume attachments") } return err } @@ -420,8 +435,8 @@ func (adc *attachDetachController) getNodeVolumeDevicePath( return devicePath, err } -func (adc *attachDetachController) populateDesiredStateOfWorld() error { - klog.V(5).Infof("Populating DesiredStateOfworld") +func (adc *attachDetachController) populateDesiredStateOfWorld(logger klog.Logger) error { + logger.V(5).Info("Populating DesiredStateOfworld") pods, err := adc.podLister.List(labels.Everything()) if err != nil { @@ -429,56 +444,52 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { } for _, pod := range pods { podToAdd := pod - adc.podAdd(podToAdd) + adc.podAdd(logger, podToAdd) for _, podVolume := range podToAdd.Spec.Volumes { nodeName := types.NodeName(podToAdd.Spec.NodeName) // The volume specs present in the ActualStateOfWorld are nil, let's replace those // with the correct ones found on pods. The present in the ASW with no corresponding // pod will be detached and the spec is irrelevant. - volumeSpec, err := util.CreateVolumeSpec(podVolume, podToAdd, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) + volumeSpec, err := util.CreateVolumeSpec(logger, podVolume, podToAdd, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) if err != nil { - klog.Errorf( - "Error creating spec for volume %q, pod %q/%q: %v", - podVolume.Name, - podToAdd.Namespace, - podToAdd.Name, - err) + logger.Error( + err, + "Error creating spec for volume of pod", + "pod", klog.KObj(podToAdd), + "volumeName", podVolume.Name) continue } plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) if err != nil || plugin == nil { - klog.V(10).Infof( - "Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v", - podVolume.Name, - podToAdd.Namespace, - podToAdd.Name, - err) + logger.V(10).Info( + "Skipping volume for pod: it does not implement attacher interface", + "pod", klog.KObj(podToAdd), + "volumeName", podVolume.Name, + "err", err) continue } volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) if err != nil { - klog.Errorf( - "Failed to find unique name for volume %q, pod %q/%q: %v", - podVolume.Name, - podToAdd.Namespace, - podToAdd.Name, - err) + logger.Error( + err, + "Failed to find unique name for volume of pod", + "pod", klog.KObj(podToAdd), + "volumeName", podVolume.Name) continue } attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName) if attachState == cache.AttachStateAttached { - klog.V(10).Infof("Volume %q is attached to node %q. Marking as attached in ActualStateOfWorld", - volumeName, - nodeName, - ) + logger.V(10).Info("Volume is attached to node. Marking as attached in ActualStateOfWorld", + "node", klog.KRef("", string(nodeName)), + "volumeName", volumeName) devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName) if err != nil { - klog.Errorf("Failed to find device path: %v", err) + logger.Error(err, "Failed to find device path") continue } - err = adc.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath) + err = adc.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, devicePath) if err != nil { - klog.Errorf("Failed to update volume spec for node %s: %v", nodeName, err) + logger.Error(err, "Failed to update volume spec for node", "node", klog.KRef("", string(nodeName))) } } } @@ -487,7 +498,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error { return nil } -func (adc *attachDetachController) podAdd(obj interface{}) { +func (adc *attachDetachController) podAdd(logger klog.Logger, obj interface{}) { pod, ok := obj.(*v1.Pod) if pod == nil || !ok { return @@ -502,7 +513,7 @@ func (adc *attachDetachController) podAdd(obj interface{}) { adc.desiredStateOfWorld, true /* default volume action */) - util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */ + util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */ adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) } @@ -511,7 +522,7 @@ func (adc *attachDetachController) GetDesiredStateOfWorld() cache.DesiredStateOf return adc.desiredStateOfWorld } -func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) { +func (adc *attachDetachController) podUpdate(logger klog.Logger, oldObj, newObj interface{}) { pod, ok := newObj.(*v1.Pod) if pod == nil || !ok { return @@ -526,21 +537,21 @@ func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) { adc.desiredStateOfWorld, true /* default volume action */) - util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */ + util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */ adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) } -func (adc *attachDetachController) podDelete(obj interface{}) { +func (adc *attachDetachController) podDelete(logger klog.Logger, obj interface{}) { pod, ok := obj.(*v1.Pod) if pod == nil || !ok { return } - util.ProcessPodVolumes(pod, false, /* addVolumes */ + util.ProcessPodVolumes(logger, pod, false, /* addVolumes */ adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) } -func (adc *attachDetachController) nodeAdd(obj interface{}) { +func (adc *attachDetachController) nodeAdd(logger klog.Logger, obj interface{}) { node, ok := obj.(*v1.Node) // TODO: investigate if nodeName is empty then if we can return // kubernetes/kubernetes/issues/37777 @@ -548,15 +559,15 @@ func (adc *attachDetachController) nodeAdd(obj interface{}) { return } nodeName := types.NodeName(node.Name) - adc.nodeUpdate(nil, obj) + adc.nodeUpdate(logger, nil, obj) // kubernetes/kubernetes/issues/37586 // This is to workaround the case when a node add causes to wipe out // the attached volumes field. This function ensures that we sync with // the actual status. - adc.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) + adc.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName) } -func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) { +func (adc *attachDetachController) nodeUpdate(logger klog.Logger, oldObj, newObj interface{}) { node, ok := newObj.(*v1.Node) // TODO: investigate if nodeName is empty then if we can return if node == nil || !ok { @@ -565,10 +576,10 @@ func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) { nodeName := types.NodeName(node.Name) adc.addNodeToDswp(node, nodeName) - adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) + adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse) } -func (adc *attachDetachController) nodeDelete(obj interface{}) { +func (adc *attachDetachController) nodeDelete(logger klog.Logger, obj interface{}) { node, ok := obj.(*v1.Node) if node == nil || !ok { return @@ -577,10 +588,10 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) { nodeName := types.NodeName(node.Name) if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil { // This might happen during drain, but we still want it to appear in our logs - klog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err) + logger.Info("Error removing node from desired-state-of-world", "node", klog.KObj(node), "err", err) } - adc.processVolumesInUse(nodeName, node.Status.VolumesInUse) + adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse) } func (adc *attachDetachController) enqueuePVC(obj interface{}) { @@ -593,19 +604,19 @@ func (adc *attachDetachController) enqueuePVC(obj interface{}) { } // pvcWorker processes items from pvcQueue -func (adc *attachDetachController) pvcWorker() { - for adc.processNextItem() { +func (adc *attachDetachController) pvcWorker(ctx context.Context) { + for adc.processNextItem(klog.FromContext(ctx)) { } } -func (adc *attachDetachController) processNextItem() bool { +func (adc *attachDetachController) processNextItem(logger klog.Logger) bool { keyObj, shutdown := adc.pvcQueue.Get() if shutdown { return false } defer adc.pvcQueue.Done(keyObj) - if err := adc.syncPVCByKey(keyObj.(string)); err != nil { + if err := adc.syncPVCByKey(logger, keyObj.(string)); err != nil { // Rather than wait for a full resync, re-add the key to the // queue to be processed. adc.pvcQueue.AddRateLimited(keyObj) @@ -619,16 +630,16 @@ func (adc *attachDetachController) processNextItem() bool { return true } -func (adc *attachDetachController) syncPVCByKey(key string) error { - klog.V(5).Infof("syncPVCByKey[%s]", key) +func (adc *attachDetachController) syncPVCByKey(logger klog.Logger, key string) error { + logger.V(5).Info("syncPVCByKey", "pvcKey", key) namespace, name, err := kcache.SplitMetaNamespaceKey(key) if err != nil { - klog.V(4).Infof("error getting namespace & name of pvc %q to get pvc from informer: %v", key, err) + logger.V(4).Info("Error getting namespace & name of pvc to get pvc from informer", "pvcKey", key, "err", err) return nil } pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name) if apierrors.IsNotFound(err) { - klog.V(4).Infof("error getting pvc %q from informer: %v", key, err) + logger.V(4).Info("Error getting pvc from informer", "pvcKey", key, "err", err) return nil } if err != nil { @@ -658,7 +669,7 @@ func (adc *attachDetachController) syncPVCByKey(key string) error { adc.desiredStateOfWorld, true /* default volume action */) - util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */ + util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */ adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator) } return nil @@ -669,8 +680,8 @@ func (adc *attachDetachController) syncPVCByKey(key string) error { // corresponding volume in the actual state of the world to indicate that it is // mounted. func (adc *attachDetachController) processVolumesInUse( - nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) { - klog.V(4).Infof("processVolumesInUse for node %q", nodeName) + logger klog.Logger, nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) { + logger.V(4).Info("processVolumesInUse for node", "node", klog.KRef("", string(nodeName))) for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) { mounted := false for _, volumeInUse := range volumesInUse { @@ -679,11 +690,14 @@ func (adc *attachDetachController) processVolumesInUse( break } } - err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted) + err := adc.actualStateOfWorld.SetVolumeMountedByNode(logger, attachedVolume.VolumeName, nodeName, mounted) if err != nil { - klog.Warningf( - "SetVolumeMountedByNode(%q, %q, %v) returned an error: %v", - attachedVolume.VolumeName, nodeName, mounted, err) + logger.Info( + "SetVolumeMountedByNode returned an error", + "node", klog.KRef("", string(nodeName)), + "volumeName", attachedVolume.VolumeName, + "mounted", mounted, + "err", err) } } } @@ -696,10 +710,10 @@ func (adc *attachDetachController) processVolumesInUse( // // if yes, the reconciler will attempt attach on the volume; // if not (could be a dangling attachment), the reconciler will detach this volume. -func (adc *attachDetachController) processVolumeAttachments() error { +func (adc *attachDetachController) processVolumeAttachments(logger klog.Logger) error { vas, err := adc.volumeAttachmentLister.List(labels.Everything()) if err != nil { - klog.Errorf("failed to list VolumeAttachment objects: %v", err) + logger.Error(err, "Failed to list VolumeAttachment objects") return err } for _, va := range vas { @@ -707,13 +721,12 @@ func (adc *attachDetachController) processVolumeAttachments() error { pvName := va.Spec.Source.PersistentVolumeName if pvName == nil { // Currently VA objects are created for CSI volumes only. nil pvName is unexpected, generate a warning - klog.Warningf("Skipping the va as its pvName is nil, va.Name: %q, nodeName: %q", - va.Name, nodeName) + logger.Info("Skipping the va as its pvName is nil", "node", klog.KRef("", string(nodeName)), "vaName", va.Name) continue } pv, err := adc.pvLister.Get(*pvName) if err != nil { - klog.Errorf("Unable to lookup pv object for: %q, err: %v", *pvName, err) + logger.Error(err, "Unable to lookup pv object", "PV", klog.KRef("", *pvName)) continue } @@ -730,13 +743,7 @@ func (adc *attachDetachController) processVolumeAttachments() error { // podNamespace is not needed here for Azurefile as the volumeName generated will be the same with or without podNamespace volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, "" /* podNamespace */, adc.intreeToCSITranslator) if err != nil { - klog.Errorf( - "Failed to translate intree volumeSpec to CSI volumeSpec for volume:%q, va.Name:%q, nodeName:%q: %s. Error: %v", - *pvName, - va.Name, - nodeName, - inTreePluginName, - err) + logger.Error(err, "Failed to translate intree volumeSpec to CSI volumeSpec for volume", "node", klog.KRef("", string(nodeName)), "inTreePluginName", inTreePluginName, "vaName", va.Name, "PV", klog.KRef("", *pvName)) continue } } @@ -746,32 +753,22 @@ func (adc *attachDetachController) processVolumeAttachments() error { plugin, err = adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) if err != nil || plugin == nil { // Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning - klog.Warningf( - "Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v", - *pvName, - nodeName, - err) + logger.Info("Skipping processing the volume on node, no attacher interface found", "node", klog.KRef("", string(nodeName)), "PV", klog.KRef("", *pvName), "err", err) continue } } volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec) if err != nil { - klog.Errorf( - "Failed to find unique name for volume:%q, va.Name:%q, nodeName:%q: %v", - *pvName, - va.Name, - nodeName, - err) + logger.Error(err, "Failed to find unique name for volume", "node", klog.KRef("", string(nodeName)), "vaName", va.Name, "PV", klog.KRef("", *pvName)) continue } attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName) if attachState == cache.AttachStateDetached { - klog.V(1).Infof("Marking volume attachment as uncertain as volume:%q (%q) is not attached (%v)", - volumeName, nodeName, attachState) - err = adc.actualStateOfWorld.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName) + logger.V(1).Info("Marking volume attachment as uncertain as volume is not attached", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName, "attachState", attachState) + err = adc.actualStateOfWorld.MarkVolumeAsUncertain(logger, volumeName, volumeSpec, nodeName) if err != nil { - klog.Errorf("MarkVolumeAsUncertain fail to add the volume %q (%q) to ASW. err: %s", volumeName, nodeName, err) + logger.Error(err, "MarkVolumeAsUncertain fail to add the volume to ASW", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName) } } } @@ -887,7 +884,7 @@ func (adc *attachDetachController) GetServiceAccountTokenFunc() func(_, _ string func (adc *attachDetachController) DeleteServiceAccountTokenFunc() func(types.UID) { return func(types.UID) { - klog.Errorf("DeleteServiceAccountToken unsupported in attachDetachController") + klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in attachDetachController") } } diff --git a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go index 31eb6939580..5fa92094d42 100644 --- a/pkg/controller/volume/attachdetach/attach_detach_controller_test.go +++ b/pkg/controller/volume/attachdetach/attach_detach_controller_test.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" kcache "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" @@ -47,7 +48,9 @@ func Test_NewAttachDetachController_Positive(t *testing.T) { informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) // Act + logger, _ := ktesting.NewTestContext(t) _, err := NewAttachDetachController( + logger, fakeKubeClient, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Nodes(), @@ -107,12 +110,13 @@ func Test_AttachDetachControllerStateOfWolrdPopulators_Positive(t *testing.T) { adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr) adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr) - err := adc.populateActualStateOfWorld() + logger, _ := ktesting.NewTestContext(t) + err := adc.populateActualStateOfWorld(logger) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: <%v>", err) } - err = adc.populateDesiredStateOfWorld() + err = adc.populateDesiredStateOfWorld(logger) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } @@ -172,7 +176,11 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 var podsNum, extraPodsNum, nodesNum, i int // Create the controller + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() adcObj, err := NewAttachDetachController( + logger, fakeKubeClient, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Nodes(), @@ -196,8 +204,6 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 adc := adcObj.(*attachDetachController) - stopCh := make(chan struct{}) - pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) @@ -227,9 +233,9 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 csiNodeInformer.GetIndexer().Add(&csiNodeToAdd) } - informerFactory.Start(stopCh) + informerFactory.Start(ctx.Done()) - if !kcache.WaitForNamedCacheSync("attach detach", stopCh, + if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(), informerFactory.Core().V1().Pods().Informer().HasSynced, informerFactory.Core().V1().Nodes().Informer().HasSynced, informerFactory.Storage().V1().CSINodes().Informer().HasSynced) { @@ -278,7 +284,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 } // Populate ASW - err = adc.populateActualStateOfWorld() + err = adc.populateActualStateOfWorld(logger) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: <%v>", err) } @@ -295,7 +301,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 } // Populate DSW - err = adc.populateDesiredStateOfWorld() + err = adc.populateDesiredStateOfWorld(logger) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } @@ -310,9 +316,8 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2 podInformer.GetIndexer().Add(newPod) } - go adc.reconciler.Run(stopCh) - go adc.desiredStateOfWorldPopulator.Run(stopCh) - defer close(stopCh) + go adc.reconciler.Run(ctx) + go adc.desiredStateOfWorldPopulator.Run(ctx) time.Sleep(time.Second * 1) // Wait so the reconciler calls sync at least once @@ -437,7 +442,11 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { vaInformer := informerFactory.Storage().V1().VolumeAttachments().Informer() // Create the controller + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() adcObj, err := NewAttachDetachController( + logger, fakeKubeClient, informerFactory.Core().V1().Pods(), informerFactory.Core().V1().Nodes(), @@ -537,10 +546,9 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { } // Makesure the informer cache is synced - stopCh := make(chan struct{}) - informerFactory.Start(stopCh) + informerFactory.Start(ctx.Done()) - if !kcache.WaitForNamedCacheSync("attach detach", stopCh, + if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(), informerFactory.Core().V1().Pods().Informer().HasSynced, informerFactory.Core().V1().Nodes().Informer().HasSynced, informerFactory.Core().V1().PersistentVolumes().Informer().HasSynced, @@ -549,21 +557,19 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) { } // Populate ASW - err = adc.populateActualStateOfWorld() + err = adc.populateActualStateOfWorld(logger) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: <%v>", err) } // Populate DSW - err = adc.populateDesiredStateOfWorld() + err = adc.populateDesiredStateOfWorld(logger) if err != nil { t.Fatalf("Run failed with error. Expected: Actual: %v", err) } // Run reconciler and DSW populator loops - go adc.reconciler.Run(stopCh) - go adc.desiredStateOfWorldPopulator.Run(stopCh) - defer close(stopCh) - + go adc.reconciler.Run(ctx) + go adc.desiredStateOfWorldPopulator.Run(ctx) if tc.csiMigration { verifyExpectedVolumeState(t, adc, tc) } else { diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go index 5d580a5f557..db79351be78 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world.go @@ -23,13 +23,12 @@ package cache import ( "fmt" + "k8s.io/klog/v2" "sync" "time" "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/klog/v2" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/volume" @@ -60,7 +59,7 @@ type ActualStateOfWorld interface { // added. // If no node with the name nodeName exists in list of attached nodes for // the specified volume, the node is added. - AddVolumeNode(uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, attached bool) (v1.UniqueVolumeName, error) + AddVolumeNode(logger klog.Logger, uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, attached bool) (v1.UniqueVolumeName, error) // SetVolumeMountedByNode sets the MountedByNode value for the given volume // and node. When set to true the mounted parameter indicates the volume @@ -72,23 +71,23 @@ type ActualStateOfWorld interface { // returned. // If no node with the name nodeName exists in list of attached nodes for // the specified volume, an error is returned. - SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error + SetVolumeMountedByNode(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error // SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified // node to true indicating the AttachedVolume field in the Node's Status // object needs to be updated by the node updater again. // If the specified node does not exist in the nodesToUpdateStatusFor list, // log the error and return - SetNodeStatusUpdateNeeded(nodeName types.NodeName) + SetNodeStatusUpdateNeeded(logger klog.Logger, nodeName types.NodeName) // ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach // request any more for the volume - ResetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) + ResetDetachRequestTime(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) // SetDetachRequestTime sets the detachRequestedTime to current time if this is no // previous request (the previous detachRequestedTime is zero) and return the time elapsed // since last request - SetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) + SetDetachRequestTime(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) // DeleteVolumeNode removes the given volume and node from the underlying // store indicating the specified volume is no longer attached to the @@ -135,12 +134,12 @@ type ActualStateOfWorld interface { // this may differ from the actual list of attached volumes for the node // since volumes should be removed from this list as soon a detach operation // is considered, before the detach operation is triggered). - GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume + GetVolumesToReportAttached(logger klog.Logger) map[types.NodeName][]v1.AttachedVolume // GetVolumesToReportAttachedForNode returns the list of volumes that should be reported as // attached for the given node. It reports a boolean indicating if there is an update for that // node and the corresponding attachedVolumes list. - GetVolumesToReportAttachedForNode(name types.NodeName) (bool, []v1.AttachedVolume) + GetVolumesToReportAttachedForNode(logger klog.Logger, name types.NodeName) (bool, []v1.AttachedVolume) // GetNodesToUpdateStatusFor returns the map of nodeNames to nodeToUpdateStatusFor GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor @@ -279,15 +278,17 @@ type nodeToUpdateStatusFor struct { } func (asw *actualStateOfWorld) MarkVolumeAsUncertain( + logger klog.Logger, uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error { - _, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, "", false /* isAttached */) + _, err := asw.AddVolumeNode(logger, uniqueName, volumeSpec, nodeName, "", false /* isAttached */) return err } func (asw *actualStateOfWorld) MarkVolumeAsAttached( + logger klog.Logger, uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error { - _, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, devicePath, true) + _, err := asw.AddVolumeNode(logger, uniqueName, volumeSpec, nodeName, devicePath, true) return err } @@ -304,13 +305,15 @@ func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached( } func (asw *actualStateOfWorld) AddVolumeToReportAsAttached( + logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) { asw.Lock() defer asw.Unlock() - asw.addVolumeToReportAsAttached(volumeName, nodeName) + asw.addVolumeToReportAsAttached(logger, volumeName, nodeName) } func (asw *actualStateOfWorld) AddVolumeNode( + logger klog.Logger, uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, isAttached bool) (v1.UniqueVolumeName, error) { volumeName := uniqueName if volumeName == "" { @@ -354,10 +357,10 @@ func (asw *actualStateOfWorld) AddVolumeNode( // Update the fields for volume object except the nodes attached to the volumes. volumeObj.devicePath = devicePath volumeObj.spec = volumeSpec - klog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q", - volumeName, - nodeName, - devicePath) + logger.V(2).Info("Volume is already added to attachedVolume list to node, update device path", + "volumeName", volumeName, + "node", klog.KRef("", string(nodeName)), + "devicePath", devicePath) } node, nodeExists := volumeObj.nodesAttachedTo[nodeName] if !nodeExists { @@ -370,22 +373,23 @@ func (asw *actualStateOfWorld) AddVolumeNode( } } else { node.attachedConfirmed = isAttached - klog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q, the current attach state is %t", - volumeName, - nodeName, - isAttached) + logger.V(5).Info("Volume is already added to attachedVolume list to the node", + "volumeName", volumeName, + "node", klog.KRef("", string(nodeName)), + "currentAttachState", isAttached) } volumeObj.nodesAttachedTo[nodeName] = node asw.attachedVolumes[volumeName] = volumeObj if isAttached { - asw.addVolumeToReportAsAttached(volumeName, nodeName) + asw.addVolumeToReportAsAttached(logger, volumeName, nodeName) } return volumeName, nil } func (asw *actualStateOfWorld) SetVolumeMountedByNode( + logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error { asw.Lock() defer asw.Unlock() @@ -397,21 +401,22 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode( nodeObj.mountedByNode = mounted volumeObj.nodesAttachedTo[nodeName] = nodeObj - klog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t", - volumeName, - nodeName, - mounted) + logger.V(4).Info("SetVolumeMountedByNode volume to the node", + "node", klog.KRef("", string(nodeName)), + "volumeName", volumeName, + "mounted", mounted) return nil } func (asw *actualStateOfWorld) ResetDetachRequestTime( + logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) { asw.Lock() defer asw.Unlock() volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName) if err != nil { - klog.Errorf("Failed to ResetDetachRequestTime with error: %v", err) + logger.Error(err, "Failed to ResetDetachRequestTime with error") return } nodeObj.detachRequestedTime = time.Time{} @@ -419,6 +424,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime( } func (asw *actualStateOfWorld) SetDetachRequestTime( + logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) { asw.Lock() defer asw.Unlock() @@ -431,9 +437,9 @@ func (asw *actualStateOfWorld) SetDetachRequestTime( if nodeObj.detachRequestedTime.IsZero() { nodeObj.detachRequestedTime = time.Now() volumeObj.nodesAttachedTo[nodeName] = nodeObj - klog.V(4).Infof("Set detach request time to current time for volume %v on node %q", - volumeName, - nodeName) + logger.V(4).Info("Set detach request time to current time for volume on node", + "node", klog.KRef("", string(nodeName)), + "volumeName", volumeName) } return time.Since(nodeObj.detachRequestedTime), nil } @@ -488,10 +494,10 @@ func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached( // Add the volumeName to the node's volumesToReportAsAttached list // This is an internal function and caller should acquire and release the lock func (asw *actualStateOfWorld) addVolumeToReportAsAttached( - volumeName v1.UniqueVolumeName, nodeName types.NodeName) { + logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) { // In case the volume/node entry is no longer in attachedVolume list, skip the rest if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil { - klog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName) + logger.V(4).Info("Volume is no longer attached to node", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName) return } nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] @@ -503,7 +509,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached( volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName), } asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate - klog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName) + logger.V(4).Info("Add new node to nodesToUpdateStatusFor", "node", klog.KRef("", string(nodeName))) } _, nodeToUpdateVolumeExists := nodeToUpdate.volumesToReportAsAttached[volumeName] @@ -511,7 +517,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached( nodeToUpdate.statusUpdateNeeded = true nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate - klog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName) + logger.V(4).Info("Report volume as attached to node", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName) } } @@ -534,11 +540,11 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN return nil } -func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName) { +func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(logger klog.Logger, nodeName types.NodeName) { asw.Lock() defer asw.Unlock() if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil { - klog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err) + logger.Info("Failed to update statusUpdateNeeded field in actual state of world", "err", err) } } @@ -584,8 +590,8 @@ func (asw *actualStateOfWorld) GetAttachState( } // SetVolumeClaimSize sets size of the volume. But this function should not be used from attach_detach controller. -func (asw *actualStateOfWorld) InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) { - klog.V(5).Infof("no-op InitializeClaimSize call in attach-detach controller.") +func (asw *actualStateOfWorld) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) { + logger.V(5).Info("no-op InitializeClaimSize call in attach-detach controller") } func (asw *actualStateOfWorld) GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity { @@ -663,7 +669,7 @@ func (asw *actualStateOfWorld) GetNodesForAttachedVolume(volumeName v1.UniqueVol return nodes } -func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume { +func (asw *actualStateOfWorld) GetVolumesToReportAttached(logger klog.Logger) map[types.NodeName][]v1.AttachedVolume { asw.Lock() defer asw.Unlock() @@ -676,14 +682,14 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][ // of this node will be updated, so set the flag statusUpdateNeeded to false indicating // the current status is already updated. if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil { - klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err) + logger.Error(err, "Failed to update statusUpdateNeeded field when getting volumes") } } return volumesToReportAttached } -func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(nodeName types.NodeName) (bool, []v1.AttachedVolume) { +func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(logger klog.Logger, nodeName types.NodeName) (bool, []v1.AttachedVolume) { asw.Lock() defer asw.Unlock() @@ -700,7 +706,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(nodeName types. // of this node will be updated, so set the flag statusUpdateNeeded to false indicating // the current status is already updated. if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil { - klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err) + logger.Error(err, "Failed to update statusUpdateNeeded field when getting volumes") } return true, volumesToReportAttached diff --git a/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go b/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go index 984b4d7a7b1..aec08e2a5b4 100644 --- a/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go +++ b/pkg/controller/volume/attachdetach/cache/actual_state_of_world_test.go @@ -22,6 +22,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/klog/v2/ktesting" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumeutil "k8s.io/kubernetes/pkg/volume/util" @@ -40,7 +41,8 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) { devicePath := "fake/device/path" // Act - generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) // Assert if err != nil { @@ -75,7 +77,8 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T) devicePath := "fake/device/path" // Act - generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, false) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, false) // Assert if err != nil { @@ -93,7 +96,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T) } verifyAttachedVolume(t, allVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) - reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() + reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger) _, exists := reportAsAttachedVolumesMap[nodeName] if exists { t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Actual: Expect: Expect: Actual: <%v>", addErr) } @@ -398,11 +405,12 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) { node1Name := types.NodeName("node1-name") node2Name := types.NodeName("node2-name") devicePath := "fake/device/path" - generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true) if add1Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) } - generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, node2Name, devicePath, true) + generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node2Name, devicePath, true) if add2Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) } @@ -446,7 +454,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -479,7 +488,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) { node1Name := types.NodeName("node1-name") node2Name := types.NodeName("node2-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -550,7 +560,8 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -577,14 +588,15 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) { volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) node1Name := types.NodeName("node1-name") devicePath := "fake/device/path" - generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Name, volume1Spec, node1Name, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volume1Name, volume1Spec, node1Name, devicePath, true) if add1Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) } volume2Name := v1.UniqueVolumeName("volume2-name") volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) node2Name := types.NodeName("node2-name") - generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Name, volume2Spec, node2Name, devicePath, true) + generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volume2Name, volume2Spec, node2Name, devicePath, true) if add2Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) } @@ -620,12 +632,13 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) { if err != nil || plugin == nil { t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err) } - generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath, true) if add1Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) } node2Name := types.NodeName("node2-name") - generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true) + generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true) if add2Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) } @@ -659,7 +672,8 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -686,14 +700,15 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } // Act - setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) - setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) + setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */) + setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */) // Assert if setVolumeMountedErr1 != nil { @@ -722,7 +737,8 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -735,7 +751,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) { verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */) // Act - setVolumeMountedErr := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) + setVolumeMountedErr := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */) // Assert if setVolumeMountedErr != nil { @@ -762,15 +778,16 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } // Act - setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) - setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) - generatedVolumeName, addErr = asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */) + setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */) + generatedVolumeName, addErr = asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) // Assert if setVolumeMountedErr1 != nil { @@ -803,11 +820,12 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } - _, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) + _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName) if err != nil { t.Fatalf("SetDetachRequestTime failed. Expected: Actual: <%v>", err) } @@ -818,8 +836,8 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest expectedDetachRequestedTime := asw.GetAttachedVolumes()[0].DetachRequestedTime // Act - setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) - setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) + setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */) + setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */) // Assert if setVolumeMountedErr1 != nil { @@ -850,7 +868,8 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) { devicePath := "fake/device/path" volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -877,13 +896,14 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } // Act - _, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) + _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName) if err != nil { t.Fatalf("SetDetachRequestTime failed. Expected: Actual: <%v>", err) } @@ -913,19 +933,20 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } // Act - _, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) + _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName) if err != nil { t.Fatalf("SetDetachRequestTime failed. Expected: Actual: <%v>", err) } markDesireToDetachErr := asw.RemoveVolumeFromReportAsAttached(generatedVolumeName, nodeName) // Reset detach request time to 0 - asw.ResetDetachRequestTime(generatedVolumeName, nodeName) + asw.ResetDetachRequestTime(logger, generatedVolumeName, nodeName) // Assert if markDesireToDetachErr != nil { @@ -956,12 +977,13 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } - setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) - setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) + setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */) + setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */) if setVolumeMountedErr1 != nil { t.Fatalf("SetVolumeMountedByNode1 failed. Expected Actual: <%v>", setVolumeMountedErr1) } @@ -970,7 +992,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou } // Act - _, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) + _, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName) if err != nil { t.Fatalf("SetDetachRequestTime failed. Expected: Actual: <%v>", err) } @@ -999,7 +1021,8 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -1009,7 +1032,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) { t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: Actual: <%v>", removeVolumeDetachErr) } - reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() + reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger) volumes, exists := reportAsAttachedVolumesMap[nodeName] if !exists { t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: Actual: Actual: <%v>", addErr) } @@ -1042,7 +1066,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive( t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: Actual: <%v>", removeVolumeDetachErr) } - reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() + reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger) volumes, exists := reportAsAttachedVolumesMap[nodeName] if !exists { t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: Actual: Actual: <%v>", len(volumes)) } - asw.AddVolumeToReportAsAttached(generatedVolumeName, nodeName) - reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached() + asw.AddVolumeToReportAsAttached(logger, generatedVolumeName, nodeName) + reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached(logger) volumes, exists = reportAsAttachedVolumesMap[nodeName] if !exists { t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: Actual: Actual: <%v>", addErr) } @@ -1085,7 +1110,7 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) { t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: Actual: <%v>", removeVolumeDetachErr) } - reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached() + reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger) volumes, exists := reportAsAttachedVolumesMap[nodeName] if !exists { t.Fatalf("Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode failed. Expected: Actual: Actual: Actual: <%v>", addErr) } maxWaitTime := 1 * time.Second - etime, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName) + etime, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName) if err != nil { t.Fatalf("SetDetachRequestTime failed. Expected: Actual: <%v>", err) } @@ -1135,7 +1161,7 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) { } // Sleep and call SetDetachRequestTime again time.Sleep(maxWaitTime) - etime, err = asw.SetDetachRequestTime(generatedVolumeName, nodeName) + etime, err = asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName) if err != nil { t.Fatalf("SetDetachRequestTime failed. Expected: Actual: <%v>", err) } @@ -1167,7 +1193,8 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) nodeName := types.NodeName("node-name") devicePath := "fake/device/path" - generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) if addErr != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", addErr) } @@ -1191,14 +1218,15 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) { volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) node1Name := types.NodeName("node1-name") devicePath := "fake/device/path" - _, add1Err := asw.AddVolumeNode(volume1Name, volume1Spec, node1Name, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + _, add1Err := asw.AddVolumeNode(logger, volume1Name, volume1Spec, node1Name, devicePath, true) if add1Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) } volume2Name := v1.UniqueVolumeName("volume2-name") volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) node2Name := types.NodeName("node2-name") - generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Name, volume2Spec, node2Name, devicePath, true) + generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volume2Name, volume2Spec, node2Name, devicePath, true) if add2Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) } @@ -1222,6 +1250,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) node1Name := types.NodeName("node1-name") devicePath := "fake/device/path" + logger, _ := ktesting.NewTestContext(t) plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) if err != nil || plugin == nil { t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err) @@ -1230,12 +1259,12 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) { if err != nil || plugin == nil { t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err) } - generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath, true) + generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath, true) if add1Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) } node2Name := types.NodeName("node2-name") - generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true) + generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true) if add2Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) } @@ -1266,6 +1295,7 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) { volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) node1Name := types.NodeName("node1-name") devicePath1 := "fake/device/path1" + logger, _ := ktesting.NewTestContext(t) plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) if err != nil || plugin == nil { t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err) @@ -1274,13 +1304,13 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) { if err != nil || plugin == nil { t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err) } - generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath1, true) + generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath1, true) if add1Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add1Err) } node2Name := types.NodeName("node2-name") devicePath2 := "fake/device/path2" - generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath2, true) + generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath2, true) if add2Err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", add2Err) } @@ -1313,7 +1343,8 @@ func Test_SetNodeStatusUpdateNeededError(t *testing.T) { nodeName := types.NodeName("node-1") // Act - asw.SetNodeStatusUpdateNeeded(nodeName) + logger, _ := ktesting.NewTestContext(t) + asw.SetNodeStatusUpdateNeeded(logger, nodeName) // Assert nodesToUpdateStatusFor := asw.GetNodesToUpdateStatusFor() @@ -1393,7 +1424,8 @@ func Test_MarkVolumeAsAttached(t *testing.T) { } // Act - err = asw.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, devicePath) // Assert if err != nil { @@ -1429,7 +1461,8 @@ func Test_MarkVolumeAsUncertain(t *testing.T) { } // Act - err = asw.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsUncertain(logger, volumeName, volumeSpec, nodeName) // Assert if err != nil { @@ -1464,14 +1497,15 @@ func Test_GetVolumesToReportAttachedForNode_Positive(t *testing.T) { devicePath := "fake/device/path" // Act - generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true) + logger, _ := ktesting.NewTestContext(t) + generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true) // Assert if err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", err) } - needsUpdate, attachedVolumes := asw.GetVolumesToReportAttachedForNode(nodeName) + needsUpdate, attachedVolumes := asw.GetVolumesToReportAttachedForNode(logger, nodeName) if !needsUpdate { t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: Expect: Actual: <%v>", len(attachedVolumes)) } - needsUpdate, _ = asw.GetVolumesToReportAttachedForNode(nodeName) + needsUpdate, _ = asw.GetVolumesToReportAttachedForNode(logger, nodeName) if needsUpdate { t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: Expect: Actual: <%v>", removeVolumeDetachErr) } - needsUpdate, attachedVolumes = asw.GetVolumesToReportAttachedForNode(nodeName) + needsUpdate, attachedVolumes = asw.GetVolumesToReportAttachedForNode(logger, nodeName) if !needsUpdate { t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: Expect: Expect: rc.syncDuration { - klog.V(5).Info("Starting reconciling attached volumes still attached") + logger.V(5).Info("Starting reconciling attached volumes still attached") rc.sync() } } @@ -164,11 +165,12 @@ func (rc *reconciler) nodeIsHealthy(nodeName types.NodeName) (bool, error) { return nodeutil.IsNodeReady(node), nil } -func (rc *reconciler) reconcile() { +func (rc *reconciler) reconcile(ctx context.Context) { // Detaches are triggered before attaches so that volumes referenced by // pods that are rescheduled to a different node are detached first. // Ensure volumes that should be detached are detached. + logger := klog.FromContext(ctx) for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() { if !rc.desiredStateOfWorld.VolumeExists( attachedVolume.VolumeName, attachedVolume.NodeName) { @@ -182,12 +184,12 @@ func (rc *reconciler) reconcile() { // allows multi attach across different nodes. if util.IsMultiAttachAllowed(attachedVolume.VolumeSpec) { if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, attachedVolume.NodeName, operationexecutor.DetachOperationName) { - klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff for node %q. Can't start detach", attachedVolume.VolumeName, attachedVolume.NodeName) + logger.V(10).Info("Operation for volume is already running or still in exponential backoff for node. Can't start detach", "node", klog.KRef("", string(attachedVolume.NodeName)), "volumeName", attachedVolume.VolumeName) continue } } else { if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, "" /* nodeName */, operationexecutor.DetachOperationName) { - klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff in the cluster. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName) + logger.V(10).Info("Operation for volume is already running or still in exponential backoff in the cluster. Can't start detach for node", "node", klog.KRef("", string(attachedVolume.NodeName)), "volumeName", attachedVolume.VolumeName) continue } } @@ -201,14 +203,14 @@ func (rc *reconciler) reconcile() { // See https://github.com/kubernetes/kubernetes/issues/93902 attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName) if attachState == cache.AttachStateDetached { - klog.V(5).InfoS("Volume detached--skipping", "volume", attachedVolume) + logger.V(5).Info("Volume detached--skipping", "volume", attachedVolume) continue } // Set the detach request time - elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName) + elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(logger, attachedVolume.VolumeName, attachedVolume.NodeName) if err != nil { - klog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err) + logger.Error(err, "Cannot trigger detach because it fails to set detach request time with error") continue } // Check whether timeout has reached the maximum waiting time @@ -216,7 +218,7 @@ func (rc *reconciler) reconcile() { isHealthy, err := rc.nodeIsHealthy(attachedVolume.NodeName) if err != nil { - klog.Errorf("failed to get health of node %s: %s", attachedVolume.NodeName, err.Error()) + logger.Error(err, "Failed to get health of node", "node", klog.KRef("", string(attachedVolume.NodeName))) } // Force detach volumes from unhealthy nodes after maxWaitForUnmountDuration. @@ -224,13 +226,13 @@ func (rc *reconciler) reconcile() { hasOutOfServiceTaint, err := rc.hasOutOfServiceTaint(attachedVolume.NodeName) if err != nil { - klog.Errorf("failed to get taint specs for node %s: %s", attachedVolume.NodeName, err.Error()) + logger.Error(err, "Failed to get taint specs for node", "node", klog.KRef("", string(attachedVolume.NodeName))) } // Check whether volume is still mounted. Skip detach if it is still mounted unless force detach timeout // or the node has `node.kubernetes.io/out-of-service` taint. if attachedVolume.MountedByNode && !forceDetach && !hasOutOfServiceTaint { - klog.V(5).InfoS("Cannot detach volume because it is still mounted", "volume", attachedVolume) + logger.V(5).Info("Cannot detach volume because it is still mounted", "volume", attachedVolume) continue } @@ -240,77 +242,77 @@ func (rc *reconciler) reconcile() { // has the correct volume attachment information. err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName) if err != nil { - klog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v", - attachedVolume.VolumeName, - attachedVolume.NodeName, - err) + logger.V(5).Info("RemoveVolumeFromReportAsAttached failed while removing volume from node", + "node", klog.KRef("", string(attachedVolume.NodeName)), + "volumeName", attachedVolume.VolumeName, + "err", err) } // Update Node Status to indicate volume is no longer safe to mount. - err = rc.nodeStatusUpdater.UpdateNodeStatusForNode(attachedVolume.NodeName) + err = rc.nodeStatusUpdater.UpdateNodeStatusForNode(logger, attachedVolume.NodeName) if err != nil { // Skip detaching this volume if unable to update node status - klog.ErrorS(err, "UpdateNodeStatusForNode failed while attempting to report volume as attached", "volume", attachedVolume) + logger.Error(err, "UpdateNodeStatusForNode failed while attempting to report volume as attached", "volume", attachedVolume) // Add volume back to ReportAsAttached if UpdateNodeStatusForNode call failed so that node status updater will add it back to VolumeAttached list. // It is needed here too because DetachVolume is not call actually and we keep the data consistency for every reconcile. - rc.actualStateOfWorld.AddVolumeToReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName) + rc.actualStateOfWorld.AddVolumeToReportAsAttached(logger, attachedVolume.VolumeName, attachedVolume.NodeName) continue } // Trigger detach volume which requires verifying safe to detach step // If timeout is true, skip verifySafeToDetach check // If the node has node.kubernetes.io/out-of-service taint with NoExecute effect, skip verifySafeToDetach check - klog.V(5).InfoS("Starting attacherDetacher.DetachVolume", "volume", attachedVolume) + logger.V(5).Info("Starting attacherDetacher.DetachVolume", "volume", attachedVolume) if hasOutOfServiceTaint { - klog.V(4).Infof("node %q has out-of-service taint", attachedVolume.NodeName) + logger.V(4).Info("node has out-of-service taint", "node", klog.KRef("", string(attachedVolume.NodeName))) } verifySafeToDetach := !(timeout || hasOutOfServiceTaint) - err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld) + err = rc.attacherDetacher.DetachVolume(logger, attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld) if err == nil { if !timeout { - klog.InfoS("attacherDetacher.DetachVolume started", "volume", attachedVolume) + logger.Info("attacherDetacher.DetachVolume started", "volume", attachedVolume) } else { metrics.RecordForcedDetachMetric() - klog.InfoS("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume) + logger.Info("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume) } } if err != nil { // Add volume back to ReportAsAttached if DetachVolume call failed so that node status updater will add it back to VolumeAttached list. // This function is also called during executing the volume detach operation in operation_generoator. // It is needed here too because DetachVolume call might fail before executing the actual operation in operation_executor (e.g., cannot find volume plugin etc.) - rc.actualStateOfWorld.AddVolumeToReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName) + rc.actualStateOfWorld.AddVolumeToReportAsAttached(logger, attachedVolume.VolumeName, attachedVolume.NodeName) if !exponentialbackoff.IsExponentialBackoff(err) { // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - klog.ErrorS(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume) + logger.Error(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume) } } } } - rc.attachDesiredVolumes() + rc.attachDesiredVolumes(logger) // Update Node Status - err := rc.nodeStatusUpdater.UpdateNodeStatuses() + err := rc.nodeStatusUpdater.UpdateNodeStatuses(logger) if err != nil { - klog.Warningf("UpdateNodeStatuses failed with: %v", err) + logger.Info("UpdateNodeStatuses failed", "err", err) } } -func (rc *reconciler) attachDesiredVolumes() { +func (rc *reconciler) attachDesiredVolumes(logger klog.Logger) { // Ensure volumes that should be attached are attached. for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() { if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) { // Don't even try to start an operation if there is already one running for the given volume and node. if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) { - klog.V(10).Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName) + logger.V(10).Info("Operation for volume is already running for node. Can't start attach", "node", klog.KRef("", string(volumeToAttach.NodeName)), "volumeName", volumeToAttach.VolumeName) continue } } else { // Don't even try to start an operation if there is already one running for the given volume if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) { - klog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName) + logger.V(10).Info("Operation for volume is already running. Can't start attach for node", "node", klog.KRef("", string(volumeToAttach.NodeName)), "volumeNames", volumeToAttach.VolumeName) continue } } @@ -323,8 +325,8 @@ func (rc *reconciler) attachDesiredVolumes() { attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName) if attachState == cache.AttachStateAttached { // Volume/Node exists, touch it to reset detachRequestedTime - klog.V(10).InfoS("Volume attached--touching", "volume", volumeToAttach) - rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName) + logger.V(10).Info("Volume attached--touching", "volume", volumeToAttach) + rc.actualStateOfWorld.ResetDetachRequestTime(logger, volumeToAttach.VolumeName, volumeToAttach.NodeName) continue } @@ -332,7 +334,7 @@ func (rc *reconciler) attachDesiredVolumes() { nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName) if len(nodes) > 0 { if !volumeToAttach.MultiAttachErrorReported { - rc.reportMultiAttachError(volumeToAttach, nodes) + rc.reportMultiAttachError(logger, volumeToAttach, nodes) rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName) } continue @@ -340,22 +342,22 @@ func (rc *reconciler) attachDesiredVolumes() { } // Volume/Node doesn't exist, spawn a goroutine to attach it - klog.V(5).InfoS("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach) - err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld) + logger.V(5).Info("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach) + err := rc.attacherDetacher.AttachVolume(logger, volumeToAttach.VolumeToAttach, rc.actualStateOfWorld) if err == nil { - klog.InfoS("attacherDetacher.AttachVolume started", "volume", volumeToAttach) + logger.Info("attacherDetacher.AttachVolume started", "volume", volumeToAttach) } if err != nil && !exponentialbackoff.IsExponentialBackoff(err) { // Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected. // Log all other errors. - klog.ErrorS(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach) + logger.Error(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach) } } } // reportMultiAttachError sends events and logs situation that a volume that // should be attached to a node is already attached to different node(s). -func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) { +func (rc *reconciler) reportMultiAttachError(logger klog.Logger, volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) { // Filter out the current node from list of nodes where the volume is // attached. // Some methods need []string, some other needs []NodeName, collect both. @@ -373,7 +375,6 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach // Get list of pods that use the volume on the other nodes. pods := rc.desiredStateOfWorld.GetVolumePodsOnNodes(otherNodes, volumeToAttach.VolumeName) - if len(pods) == 0 { // We did not find any pods that requests the volume. The pod must have been deleted already. simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another") @@ -381,7 +382,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg) } // Log detailed message to system admin - klog.InfoS("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach) + logger.Info("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach) return } @@ -417,5 +418,5 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach } // Log all pods for system admin - klog.InfoS("Multi-Attach error: volume is already used by pods", "pods", klog.KObjSlice(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach) + logger.Info("Multi-Attach error: volume is already used by pods", "pods", klog.KObjSlice(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach) } diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go index 237f7b4621c..f4c3c52fb7e 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler_test.go @@ -17,6 +17,7 @@ limitations under the License. package reconciler import ( + "context" "testing" "time" @@ -28,6 +29,8 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/tools/record" featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater" @@ -40,10 +43,10 @@ import ( ) const ( - reconcilerLoopPeriod time.Duration = 10 * time.Millisecond - syncLoopPeriod time.Duration = 100 * time.Minute - maxWaitForUnmountDuration time.Duration = 50 * time.Millisecond - maxLongWaitForUnmountDuration time.Duration = 4200 * time.Second + reconcilerLoopPeriod = 10 * time.Millisecond + syncLoopPeriod = 100 * time.Minute + maxWaitForUnmountDuration = 50 * time.Millisecond + maxLongWaitForUnmountDuration = 4200 * time.Second ) // Calls Run() @@ -70,9 +73,10 @@ func Test_Run_Positive_DoNothing(t *testing.T) { reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder) // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin) @@ -122,9 +126,10 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) { } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -175,9 +180,10 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -196,8 +202,8 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te generatedVolumeName, nodeName) } - asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) - asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) + asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */) + asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */) // Assert waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -250,9 +256,10 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -324,9 +331,10 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -345,8 +353,8 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate generatedVolumeName, nodeName) } - asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */) - asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */) + asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */) + asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */) // Assert verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin) @@ -403,9 +411,10 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing. } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 2 /* expectedCallCount */, fakePlugin) @@ -497,9 +506,10 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteOnce(t *testing. } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -579,9 +589,10 @@ func Test_Run_OneVolumeAttachAndDetachUncertainNodesWithReadWriteOnce(t *testing dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/) // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Add the pod in which the volume is attached to the uncertain node generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1) @@ -593,11 +604,11 @@ func Test_Run_OneVolumeAttachAndDetachUncertainNodesWithReadWriteOnce(t *testing // Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node. waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw) // When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse. // Without this, the delete operation will be delayed due to mounted status - asw.SetVolumeMountedByNode(generatedVolumeName, nodeName1, false /* mounted */) + asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName1, false /* mounted */) dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) @@ -629,9 +640,12 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t * informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) nodeLister := informerFactory.Core().V1().Nodes().Lister() nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() rc := NewReconciler( reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder) - reconciliationLoopFunc := rc.(*reconciler).reconciliationLoopFunc() + reconciliationLoopFunc := rc.(*reconciler).reconciliationLoopFunc(ctx) podName1 := "pod-uid1" volumeName := v1.UniqueVolumeName("volume-name") volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) @@ -646,22 +660,22 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t * } // Act - reconciliationLoopFunc() + reconciliationLoopFunc(ctx) // Volume is added to asw, volume should be reported as attached to the node. waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw) // Delete the pod dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) // Mock NodeStatusUpdate fail rc.(*reconciler).nodeStatusUpdater = statusupdater.NewFakeNodeStatusUpdater(true /* returnError */) - reconciliationLoopFunc() + reconciliationLoopFunc(ctx) // The first detach will be triggered after at least 50ms (maxWaitForUnmountDuration in test). time.Sleep(100 * time.Millisecond) - reconciliationLoopFunc() + reconciliationLoopFunc(ctx) // Right before detach operation is performed, the volume will be first removed from being reported // as attached on node status (RemoveVolumeFromReportAsAttached). After UpdateNodeStatus operation which is expected to fail, // controller then added the volume back as attached. @@ -669,7 +683,7 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t * // in node status. By calling this function (GetVolumesToReportAttached), node status should be updated, and the volume // will not need to be updated until new changes are applied (detach is triggered again) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw) } @@ -703,9 +717,10 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/) // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Add the pod in which the volume is attached to the FailDetachNode generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1) @@ -717,7 +732,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { // Volume is added to asw, volume should be reported as attached to the node. waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw) // Delete the pod, but detach will fail dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) @@ -732,7 +747,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { // will not need to be updated until new changes are applied (detach is triggered again) time.Sleep(100 * time.Millisecond) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw) // After the first detach fails, reconciler will wait for a period of time before retrying to detach. // The wait time is increasing exponentially from initial value of 0.5s (0.5, 1, 2, 4, ...). @@ -740,14 +755,14 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { // the first detach operation. At this point, volumes status should not be updated time.Sleep(100 * time.Millisecond) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw) + verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw) // Wait for 600ms to make sure second detach operation triggered. Again, The volume will be // removed from being reported as attached on node status and then added back as attached. // The volume will be in the list of attached volumes that need to be updated to node status. time.Sleep(600 * time.Millisecond) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw) // Add a second pod which tries to attach the volume to the same node. // After adding pod to the same node, detach will not be triggered any more. @@ -758,7 +773,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { // Sleep 1s to verify no detach are triggered after second pod is added in the future. time.Sleep(1000 * time.Millisecond) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw) + verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw) // Add a third pod which tries to attach the volume to a different node. // At this point, volume is still attached to first node. There are no status update for both nodes. @@ -767,8 +782,8 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) { t.Fatalf("AddPod failed. Expected: Actual: <%v>", podAddErr) } verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw) - verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw) - verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName2, asw) + verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw) + verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName2, asw) } // Creates a volume with accessMode ReadWriteOnce @@ -805,9 +820,10 @@ func Test_Run_OneVolumeAttachAndDetachTimeoutNodesWithReadWriteOnce(t *testing.T dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/) // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + logger, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Add the pod in which the volume is attached to the timeout node generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1) @@ -818,11 +834,11 @@ func Test_Run_OneVolumeAttachAndDetachTimeoutNodesWithReadWriteOnce(t *testing.T // Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node. waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw) verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateUncertain, asw) - verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, false, asw) + verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, false, asw) // When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse. // Without this, the delete operation will be delayed due to mounted status - asw.SetVolumeMountedByNode(generatedVolumeName, nodeName1, false /* mounted */) + asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName1, false /* mounted */) dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1) @@ -895,9 +911,10 @@ func Test_Run_OneVolumeDetachOnOutOfServiceTaintedNode(t *testing.T) { } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -973,9 +990,10 @@ func Test_Run_OneVolumeDetachOnNoOutOfServiceTaintedNode(t *testing.T) { } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -1057,9 +1075,10 @@ func Test_Run_OneVolumeDetachOnUnhealthyNode(t *testing.T) { } // Act - ch := make(chan struct{}) - go reconciler.Run(ch) - defer close(ch) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go reconciler.Run(ctx) // Assert waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin) @@ -1176,10 +1195,11 @@ func Test_ReportMultiAttachError(t *testing.T) { } } // Act + logger, _ := ktesting.NewTestContext(t) volumes := dsw.GetVolumesToAttach() for _, vol := range volumes { if vol.NodeName == "node1" { - rc.(*reconciler).reportMultiAttachError(vol, nodes) + rc.(*reconciler).reportMultiAttachError(logger, vol, nodes) } } @@ -1587,13 +1607,14 @@ func verifyVolumeAttachedToNode( func verifyVolumeReportedAsAttachedToNode( t *testing.T, + logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName, isAttached bool, asw cache.ActualStateOfWorld, ) { result := false - volumes := asw.GetVolumesToReportAttached() + volumes := asw.GetVolumesToReportAttached(logger) for _, volume := range volumes[nodeName] { if volume.Name == volumeName { result = true @@ -1614,11 +1635,12 @@ func verifyVolumeReportedAsAttachedToNode( func verifyVolumeNoStatusUpdateNeeded( t *testing.T, + logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName, asw cache.ActualStateOfWorld, ) { - volumes := asw.GetVolumesToReportAttached() + volumes := asw.GetVolumesToReportAttached(logger) for _, volume := range volumes[nodeName] { if volume.Name == volumeName { t.Fatalf("Check volume <%v> is reported as need to update status on node <%v>, expected false", diff --git a/pkg/controller/volume/attachdetach/statusupdater/fake_node_status_updater.go b/pkg/controller/volume/attachdetach/statusupdater/fake_node_status_updater.go index a321293321b..3bd2625b890 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/fake_node_status_updater.go +++ b/pkg/controller/volume/attachdetach/statusupdater/fake_node_status_updater.go @@ -18,6 +18,8 @@ package statusupdater import ( "fmt" + "k8s.io/klog/v2" + "k8s.io/apimachinery/pkg/types" ) @@ -31,7 +33,7 @@ type fakeNodeStatusUpdater struct { returnError bool } -func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error { +func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses(logger klog.Logger) error { if fnsu.returnError { return fmt.Errorf("fake error on update node status") } @@ -39,7 +41,7 @@ func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error { return nil } -func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatusForNode(nodeName types.NodeName) error { +func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error { if fnsu.returnError { return fmt.Errorf("fake error on update node status") } diff --git a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go index 8fd5f87d004..58a0c243f7c 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go +++ b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater.go @@ -20,14 +20,13 @@ package statusupdater import ( "fmt" - "k8s.io/klog/v2" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" nodeutil "k8s.io/component-helpers/node/util" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" ) @@ -36,9 +35,9 @@ import ( type NodeStatusUpdater interface { // Gets a list of node statuses that should be updated from the actual state // of the world and updates them. - UpdateNodeStatuses() error + UpdateNodeStatuses(logger klog.Logger) error // Update any pending status change for the given node - UpdateNodeStatusForNode(nodeName types.NodeName) error + UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error } // NewNodeStatusUpdater returns a new instance of NodeStatusUpdater. @@ -59,13 +58,13 @@ type nodeStatusUpdater struct { actualStateOfWorld cache.ActualStateOfWorld } -func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { +func (nsu *nodeStatusUpdater) UpdateNodeStatuses(logger klog.Logger) error { var nodeIssues int // TODO: investigate right behavior if nodeName is empty // kubernetes/kubernetes/issues/37777 - nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached() + nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached(logger) for nodeName, attachedVolumes := range nodesToUpdate { - err := nsu.processNodeVolumes(nodeName, attachedVolumes) + err := nsu.processNodeVolumes(logger, nodeName, attachedVolumes) if err != nil { nodeIssues += 1 } @@ -76,56 +75,50 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error { return nil } -func (nsu *nodeStatusUpdater) UpdateNodeStatusForNode(nodeName types.NodeName) error { - needsUpdate, attachedVolumes := nsu.actualStateOfWorld.GetVolumesToReportAttachedForNode(nodeName) +func (nsu *nodeStatusUpdater) UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error { + needsUpdate, attachedVolumes := nsu.actualStateOfWorld.GetVolumesToReportAttachedForNode(logger, nodeName) if !needsUpdate { return nil } - return nsu.processNodeVolumes(nodeName, attachedVolumes) + return nsu.processNodeVolumes(logger, nodeName, attachedVolumes) } -func (nsu *nodeStatusUpdater) processNodeVolumes(nodeName types.NodeName, attachedVolumes []v1.AttachedVolume) error { +func (nsu *nodeStatusUpdater) processNodeVolumes(logger klog.Logger, nodeName types.NodeName, attachedVolumes []v1.AttachedVolume) error { nodeObj, err := nsu.nodeLister.Get(string(nodeName)) if errors.IsNotFound(err) { // If node does not exist, its status cannot be updated. // Do nothing so that there is no retry until node is created. - klog.V(2).Infof( - "Could not update node status. Failed to find node %q in NodeInformer cache. Error: '%v'", - nodeName, - err) + logger.V(2).Info( + "Could not update node status. Failed to find node in NodeInformer cache", "node", klog.KRef("", string(nodeName)), "err", err) return nil } else if err != nil { // For all other errors, log error and reset flag statusUpdateNeeded // back to true to indicate this node status needs to be updated again. - klog.V(2).Infof("Error retrieving nodes from node lister. Error: %v", err) - nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) + logger.V(2).Info("Error retrieving nodes from node lister", "err", err) + nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName) return err } - err = nsu.updateNodeStatus(nodeName, nodeObj, attachedVolumes) + err = nsu.updateNodeStatus(logger, nodeName, nodeObj, attachedVolumes) if errors.IsNotFound(err) { // If node does not exist, its status cannot be updated. // Do nothing so that there is no retry until node is created. - klog.V(2).Infof( - "Could not update node status for %q; node does not exist - skipping", - nodeName) + logger.V(2).Info( + "Could not update node status, node does not exist - skipping", "node", klog.KObj(nodeObj)) return nil } else if err != nil { // If update node status fails, reset flag statusUpdateNeeded back to true // to indicate this node status needs to be updated again - nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName) + nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName) - klog.V(2).Infof( - "Could not update node status for %q; re-marking for update. %v", - nodeName, - err) + logger.V(2).Info("Could not update node status; re-marking for update", "node", klog.KObj(nodeObj), "err", err) return err } return nil } -func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error { +func (nsu *nodeStatusUpdater) updateNodeStatus(logger klog.Logger, nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error { node := nodeObj.DeepCopy() node.Status.VolumesAttached = attachedVolumes _, patchBytes, err := nodeutil.PatchNodeStatus(nsu.kubeClient.CoreV1(), nodeName, nodeObj, node) @@ -133,6 +126,6 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj return err } - klog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes) + logger.V(4).Info("Updating status for node succeeded", "node", klog.KObj(node), "patchBytes", patchBytes, "attachedVolumes", attachedVolumes) return nil } diff --git a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater_test.go b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater_test.go index c51067b7c69..87ab8941872 100644 --- a/pkg/controller/volume/attachdetach/statusupdater/node_status_updater_test.go +++ b/pkg/controller/volume/attachdetach/statusupdater/node_status_updater_test.go @@ -20,6 +20,7 @@ import ( "context" "errors" "fmt" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" @@ -37,7 +39,7 @@ import ( // setupNodeStatusUpdate creates all the needed objects for testing. // the initial environment has 2 nodes with no volumes attached // and adds one volume to attach to each node to the actual state of the world -func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualStateOfWorld, *fake.Clientset, NodeStatusUpdater) { +func setupNodeStatusUpdate(logger klog.Logger, t *testing.T) (cache.ActualStateOfWorld, *fake.Clientset, NodeStatusUpdater) { testNode1 := corev1.Node{ TypeMeta: metav1.TypeMeta{ Kind: "Node", @@ -83,11 +85,11 @@ func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualState nodeName2 := types.NodeName("testnode-2") devicePath := "fake/device/path" - _, err = asw.AddVolumeNode(volumeName1, volumeSpec1, nodeName1, devicePath, true) + _, err = asw.AddVolumeNode(logger, volumeName1, volumeSpec1, nodeName1, devicePath, true) if err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", err) } - _, err = asw.AddVolumeNode(volumeName2, volumeSpec2, nodeName2, devicePath, true) + _, err = asw.AddVolumeNode(logger, volumeName2, volumeSpec2, nodeName2, devicePath, true) if err != nil { t.Fatalf("AddVolumeNode failed. Expected: Actual: <%v>", err) } @@ -101,14 +103,15 @@ func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualState // checks that each node status.volumesAttached is of length 1 and contains the correct volume func TestNodeStatusUpdater_UpdateNodeStatuses_TwoNodesUpdate(t *testing.T) { ctx := context.Background() - asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t) + logger := klog.FromContext(ctx) + asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t) - err := nsu.UpdateNodeStatuses() + err := nsu.UpdateNodeStatuses(logger) if err != nil { t.Fatalf("UpdateNodeStatuses failed. Expected: Actual: <%v>", err) } - needToReport := asw.GetVolumesToReportAttached() + needToReport := asw.GetVolumesToReportAttached(logger) if len(needToReport) != 0 { t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <0> Actual: <%v>", len(needToReport)) } @@ -138,7 +141,8 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_TwoNodesUpdate(t *testing.T) { func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T) { ctx := context.Background() - asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t) + logger := klog.FromContext(ctx) + asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t) var failedNode string failedOnce := false @@ -153,12 +157,12 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T) return false, nil, nil }) - err := nsu.UpdateNodeStatuses() + err := nsu.UpdateNodeStatuses(logger) if errors.Is(err, failureErr) { t.Fatalf("UpdateNodeStatuses failed. Expected: Actual: <%v>", err) } - needToReport := asw.GetVolumesToReportAttached() + needToReport := asw.GetVolumesToReportAttached(logger) if len(needToReport) != 1 { t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport)) } @@ -194,14 +198,15 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T) // checks that testnode-1 status.volumesAttached is of length 1 and contains the correct volume func TestNodeStatusUpdater_UpdateNodeStatusForNode(t *testing.T) { ctx := context.Background() - asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t) + logger := klog.FromContext(ctx) + asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t) - err := nsu.UpdateNodeStatusForNode("testnode-1") + err := nsu.UpdateNodeStatusForNode(logger, "testnode-1") if err != nil { t.Fatalf("UpdateNodeStatuses failed. Expected: Actual: <%v>", err) } - needToReport := asw.GetVolumesToReportAttached() + needToReport := asw.GetVolumesToReportAttached(logger) if len(needToReport) != 1 { t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport)) } diff --git a/pkg/controller/volume/attachdetach/testing/testvolumespec.go b/pkg/controller/volume/attachdetach/testing/testvolumespec.go index 8cee67a64cb..58009f5d6fc 100644 --- a/pkg/controller/volume/attachdetach/testing/testvolumespec.go +++ b/pkg/controller/volume/attachdetach/testing/testvolumespec.go @@ -376,7 +376,7 @@ func (plugin *TestPlugin) GetVolumeName(spec *volume.Spec) (string, error) { plugin.pluginLock.Lock() defer plugin.pluginLock.Unlock() if spec == nil { - klog.Errorf("GetVolumeName called with nil volume spec") + klog.ErrorS(nil, "GetVolumeName called with nil volume spec") plugin.ErrorEncountered = true return "", fmt.Errorf("GetVolumeName called with nil volume spec") } @@ -400,7 +400,7 @@ func (plugin *TestPlugin) CanSupport(spec *volume.Spec) bool { plugin.pluginLock.Lock() defer plugin.pluginLock.Unlock() if spec == nil { - klog.Errorf("CanSupport called with nil volume spec") + klog.ErrorS(nil, "CanSupport called with nil volume spec") plugin.ErrorEncountered = true } return true @@ -414,7 +414,7 @@ func (plugin *TestPlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts vol plugin.pluginLock.Lock() defer plugin.pluginLock.Unlock() if spec == nil { - klog.Errorf("NewMounter called with nil volume spec") + klog.ErrorS(nil, "NewMounter called with nil volume spec") plugin.ErrorEncountered = true } return nil, nil @@ -540,7 +540,7 @@ func (attacher *testPluginAttacher) Attach(spec *volume.Spec, nodeName types.Nod defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - klog.Errorf("Attach called with nil volume spec") + klog.ErrorS(nil, "Attach called with nil volume spec") return "", fmt.Errorf("Attach called with nil volume spec") } attacher.attachedVolumeMap[string(nodeName)] = append(attacher.attachedVolumeMap[string(nodeName)], spec.Name()) @@ -556,7 +556,7 @@ func (attacher *testPluginAttacher) WaitForAttach(spec *volume.Spec, devicePath defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - klog.Errorf("WaitForAttach called with nil volume spec") + klog.ErrorS(nil, "WaitForAttach called with nil volume spec") return "", fmt.Errorf("WaitForAttach called with nil volume spec") } fakePath := fmt.Sprintf("%s/%s", devicePath, spec.Name()) @@ -568,7 +568,7 @@ func (attacher *testPluginAttacher) GetDeviceMountPath(spec *volume.Spec) (strin defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - klog.Errorf("GetDeviceMountPath called with nil volume spec") + klog.ErrorS(nil, "GetDeviceMountPath called with nil volume spec") return "", fmt.Errorf("GetDeviceMountPath called with nil volume spec") } return "", nil @@ -579,7 +579,7 @@ func (attacher *testPluginAttacher) MountDevice(spec *volume.Spec, devicePath st defer attacher.pluginLock.Unlock() if spec == nil { *attacher.ErrorEncountered = true - klog.Errorf("MountDevice called with nil volume spec") + klog.ErrorS(nil, "MountDevice called with nil volume spec") return fmt.Errorf("MountDevice called with nil volume spec") } return nil diff --git a/pkg/controller/volume/attachdetach/util/util.go b/pkg/controller/volume/attachdetach/util/util.go index 73c858a4455..32fdb1e8238 100644 --- a/pkg/controller/volume/attachdetach/util/util.go +++ b/pkg/controller/volume/attachdetach/util/util.go @@ -38,7 +38,7 @@ import ( // A volume.Spec that refers to an in-tree plugin spec is translated to refer // to a migrated CSI plugin spec if all conditions for CSI migration on a node // for the in-tree plugin is satisfied. -func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) { +func CreateVolumeSpec(logger klog.Logger, podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) { claimName := "" readOnly := false if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { @@ -50,10 +50,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, claimName = ephemeral.VolumeClaimName(pod, &podVolume) } if claimName != "" { - klog.V(10).Infof( - "Found PVC, ClaimName: %q/%q", - pod.Namespace, - claimName) + logger.V(10).Info("Found PVC", "PVC", klog.KRef(pod.Namespace, claimName)) // If podVolume is a PVC, fetch the real PV behind the claim pvc, err := getPVCFromCache(pod.Namespace, claimName, pvcLister) @@ -71,12 +68,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, } pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID - klog.V(10).Infof( - "Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q", - pod.Namespace, - claimName, - pvcUID, - pvName) + logger.V(10).Info("Found bound PV for PVC", "PVC", klog.KRef(pod.Namespace, claimName), "pvcUID", pvcUID, "PV", klog.KRef("", pvName)) // Fetch actual PV object volumeSpec, err := getPVSpecFromCache( @@ -98,13 +90,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, err) } - klog.V(10).Infof( - "Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)", - volumeSpec.Name(), - pvName, - pod.Namespace, - claimName, - pvcUID) + logger.V(10).Info("Extracted volumeSpec from bound PV and PVC", "PVC", klog.KRef(pod.Namespace, claimName), "pvcUID", pvcUID, "PV", klog.KRef("", pvName), "volumeSpecName", volumeSpec.Name()) return volumeSpec, nil } @@ -199,59 +185,39 @@ func DetermineVolumeAction(pod *v1.Pod, desiredStateOfWorld cache.DesiredStateOf // ProcessPodVolumes processes the volumes in the given pod and adds them to the // desired state of the world if addVolumes is true, otherwise it removes them. -func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.DesiredStateOfWorld, volumePluginMgr *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) { +func ProcessPodVolumes(logger klog.Logger, pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.DesiredStateOfWorld, volumePluginMgr *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) { if pod == nil { return } - if len(pod.Spec.Volumes) <= 0 { - klog.V(10).Infof("Skipping processing of pod %q/%q: it has no volumes.", - pod.Namespace, - pod.Name) + logger.V(10).Info("Skipping processing of pod, it has no volumes", "pod", klog.KObj(pod)) return } nodeName := types.NodeName(pod.Spec.NodeName) if nodeName == "" { - klog.V(10).Infof( - "Skipping processing of pod %q/%q: it is not scheduled to a node.", - pod.Namespace, - pod.Name) + logger.V(10).Info("Skipping processing of pod, it is not scheduled to a node", "pod", klog.KObj(pod)) return } else if !desiredStateOfWorld.NodeExists(nodeName) { // If the node the pod is scheduled to does not exist in the desired // state of the world data structure, that indicates the node is not // yet managed by the controller. Therefore, ignore the pod. - klog.V(4).Infof( - "Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.", - pod.Namespace, - pod.Name, - nodeName) + logger.V(4).Info("Skipping processing of pod, it is scheduled to node which is not managed by the controller", "node", klog.KRef("", string(nodeName)), "pod", klog.KObj(pod)) return } // Process volume spec for each volume defined in pod for _, podVolume := range pod.Spec.Volumes { - volumeSpec, err := CreateVolumeSpec(podVolume, pod, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator) + volumeSpec, err := CreateVolumeSpec(logger, podVolume, pod, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator) if err != nil { - klog.V(10).Infof( - "Error processing volume %q for pod %q/%q: %v", - podVolume.Name, - pod.Namespace, - pod.Name, - err) + logger.V(10).Info("Error processing volume for pod", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err) continue } attachableVolumePlugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec) if err != nil || attachableVolumePlugin == nil { - klog.V(10).Infof( - "Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v", - podVolume.Name, - pod.Namespace, - pod.Name, - err) + logger.V(10).Info("Skipping volume for pod, it does not implement attacher interface", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err) continue } @@ -261,12 +227,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D _, err := desiredStateOfWorld.AddPod( uniquePodName, pod, volumeSpec, nodeName) if err != nil { - klog.V(10).Infof( - "Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v", - podVolume.Name, - pod.Namespace, - pod.Name, - err) + logger.V(10).Info("Failed to add volume for pod to desiredStateOfWorld", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err) } } else { @@ -274,12 +235,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec( attachableVolumePlugin, volumeSpec) if err != nil { - klog.V(10).Infof( - "Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed with %v", - podVolume.Name, - pod.Namespace, - pod.Name, - err) + logger.V(10).Info("Failed to delete volume for pod from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err) continue } desiredStateOfWorld.DeletePod( diff --git a/pkg/controller/volume/attachdetach/util/util_test.go b/pkg/controller/volume/attachdetach/util/util_test.go index a6319ff49f4..b8f99a70bc3 100644 --- a/pkg/controller/volume/attachdetach/util/util_test.go +++ b/pkg/controller/volume/attachdetach/util/util_test.go @@ -30,6 +30,7 @@ import ( kubetypes "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/informers" csitrans "k8s.io/csi-translation-lib" + "k8s.io/klog/v2/ktesting" fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake" "k8s.io/kubernetes/pkg/volume/csimigration" "k8s.io/kubernetes/pkg/volume/fc" @@ -241,8 +242,9 @@ func Test_CreateVolumeSpec(t *testing.T) { }, } { t.Run(test.desc, func(t *testing.T) { + logger, _ := ktesting.NewTestContext(t) plugMgr, intreeToCSITranslator, csiTranslator, pvLister, pvcLister := setup(testNodeName, t) - actualSpec, err := CreateVolumeSpec(test.pod.Spec.Volumes[0], test.pod, test.createNodeName, plugMgr, pvcLister, pvLister, intreeToCSITranslator, csiTranslator) + actualSpec, err := CreateVolumeSpec(logger, test.pod.Spec.Volumes[0], test.pod, test.createNodeName, plugMgr, pvcLister, pvLister, intreeToCSITranslator, csiTranslator) if actualSpec == nil && (test.wantPersistentVolume != nil || test.wantVolume != nil) { t.Errorf("got volume spec is nil") diff --git a/pkg/controller/volume/ephemeral/controller.go b/pkg/controller/volume/ephemeral/controller.go index 6b5f27a9ad6..55c99dd0a10 100644 --- a/pkg/controller/volume/ephemeral/controller.go +++ b/pkg/controller/volume/ephemeral/controller.go @@ -165,9 +165,9 @@ func (ec *ephemeralController) onPVCDelete(obj interface{}) { func (ec *ephemeralController) Run(ctx context.Context, workers int) { defer runtime.HandleCrash() defer ec.queue.ShutDown() - - klog.Infof("Starting ephemeral volume controller") - defer klog.Infof("Shutting down ephemeral volume controller") + logger := klog.FromContext(ctx) + logger.Info("Starting ephemeral volume controller") + defer logger.Info("Shutting down ephemeral volume controller") if !cache.WaitForNamedCacheSync("ephemeral", ctx.Done(), ec.podSynced, ec.pvcsSynced) { return @@ -212,18 +212,19 @@ func (ec *ephemeralController) syncHandler(ctx context.Context, key string) erro return err } pod, err := ec.podLister.Pods(namespace).Get(name) + logger := klog.FromContext(ctx) if err != nil { if errors.IsNotFound(err) { - klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is gone", key) + logger.V(5).Info("Ephemeral: nothing to do for pod, it is gone", "podKey", key) return nil } - klog.V(5).Infof("Error getting pod %s/%s (uid: %q) from informer : %v", pod.Namespace, pod.Name, pod.UID, err) + logger.V(5).Info("Error getting pod from informer", "pod", klog.KObj(pod), "podUID", pod.UID, "err", err) return err } // Ignore pods which are already getting deleted. if pod.DeletionTimestamp != nil { - klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is marked for deletion", key) + logger.V(5).Info("Ephemeral: nothing to do for pod, it is marked for deletion", "podKey", key) return nil } @@ -239,7 +240,8 @@ func (ec *ephemeralController) syncHandler(ctx context.Context, key string) erro // handleEphemeralVolume is invoked for each volume of a pod. func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vol v1.Volume) error { - klog.V(5).Infof("ephemeral: checking volume %s", vol.Name) + logger := klog.FromContext(ctx) + logger.V(5).Info("Ephemeral: checking volume", "volumeName", vol.Name) if vol.Ephemeral == nil { return nil } @@ -254,7 +256,7 @@ func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vo return err } // Already created, nothing more to do. - klog.V(5).Infof("ephemeral: volume %s: PVC %s already created", vol.Name, pvcName) + logger.V(5).Info("Ephemeral: PVC already created", "volumeName", vol.Name, "PVC", klog.KObj(pvc)) return nil } diff --git a/pkg/controller/volume/expand/expand_controller.go b/pkg/controller/volume/expand/expand_controller.go index 31a01ae56c4..7d7e62d2c12 100644 --- a/pkg/controller/volume/expand/expand_controller.go +++ b/pkg/controller/volume/expand/expand_controller.go @@ -219,20 +219,21 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error if errors.IsNotFound(err) { return nil } + logger := klog.FromContext(ctx) if err != nil { - klog.V(5).Infof("Error getting PVC %q from informer : %v", key, err) + logger.V(5).Info("Error getting PVC from informer", "pvcKey", key, "err", err) return err } pv, err := expc.getPersistentVolume(ctx, pvc) if err != nil { - klog.V(5).Infof("Error getting Persistent Volume for PVC %q (uid: %q) from informer : %v", key, pvc.UID, err) + logger.V(5).Info("Error getting Persistent Volume for PVC from informer", "pvcKey", key, "pvcUID", pvc.UID, "err", err) return err } if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.UID != pv.Spec.ClaimRef.UID { err := fmt.Errorf("persistent Volume is not bound to PVC being updated : %s", key) - klog.V(4).Infof("%v", err) + logger.V(4).Info("", "err", err) return err } @@ -249,14 +250,14 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error volumeSpec := volume.NewSpecFromPersistentVolume(pv, false) migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec) if err != nil { - klog.V(4).Infof("failed to check CSI migration status for PVC: %s with error: %v", key, err) + logger.V(4).Info("Failed to check CSI migration status for PVC with error", "pvcKey", key, "err", err) return nil } // handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree if migratable { inTreePluginName, err := expc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(volumeSpec.PersistentVolume, volumeSpec.Volume) if err != nil { - klog.V(4).Infof("Error getting in-tree plugin name from persistent volume %s: %v", volumeSpec.PersistentVolume.Name, err) + logger.V(4).Info("Error getting in-tree plugin name from persistent volume", "volumeName", volumeSpec.PersistentVolume.Name, "err", err) return err } @@ -286,46 +287,45 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error eventType = v1.EventTypeWarning } expc.recorder.Event(pvc, eventType, events.ExternalExpanding, msg) - klog.Infof("waiting for an external controller to expand the PVC %q (uid: %q)", key, pvc.UID) + logger.Info("Waiting for an external controller to expand the PVC", "pvcKey", key, "pvcUID", pvc.UID) // If we are expecting that an external plugin will handle resizing this volume then // is no point in requeuing this PVC. return nil } volumeResizerName := volumePlugin.GetPluginName() - return expc.expand(pvc, pv, volumeResizerName) + return expc.expand(logger, pvc, pv, volumeResizerName) } -func (expc *expandController) expand(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, resizerName string) error { +func (expc *expandController) expand(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, resizerName string) error { // if node expand is complete and pv's annotation can be removed, remove the annotation from pv and return - if expc.isNodeExpandComplete(pvc, pv) && metav1.HasAnnotation(pv.ObjectMeta, util.AnnPreResizeCapacity) { + if expc.isNodeExpandComplete(logger, pvc, pv) && metav1.HasAnnotation(pv.ObjectMeta, util.AnnPreResizeCapacity) { return util.DeleteAnnPreResizeCapacity(pv, expc.GetKubeClient()) } var generatedOptions volumetypes.GeneratedOperations var err error - if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) { generatedOptions, err = expc.operationGenerator.GenerateExpandAndRecoverVolumeFunc(pvc, pv, resizerName) if err != nil { - klog.Errorf("Error starting ExpandVolume for pvc %s with %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err) + logger.Error(err, "Error starting ExpandVolume for pvc", "PVC", klog.KObj(pvc)) return err } } else { pvc, err := util.MarkResizeInProgressWithResizer(pvc, resizerName, expc.kubeClient) if err != nil { - klog.Errorf("Error setting PVC %s in progress with error : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err) + logger.Error(err, "Error setting PVC in progress with error", "PVC", klog.KObj(pvc), "err", err) return err } generatedOptions, err = expc.operationGenerator.GenerateExpandVolumeFunc(pvc, pv) if err != nil { - klog.Errorf("Error starting ExpandVolume for pvc %s with %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err) + logger.Error(err, "Error starting ExpandVolume for pvc with error", "PVC", klog.KObj(pvc), "err", err) return err } } - klog.V(5).Infof("Starting ExpandVolume for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc)) + logger.V(5).Info("Starting ExpandVolume for volume", "volumeName", util.GetPersistentVolumeClaimQualifiedName(pvc)) _, detailedErr := generatedOptions.Run() return detailedErr @@ -335,9 +335,9 @@ func (expc *expandController) expand(pvc *v1.PersistentVolumeClaim, pv *v1.Persi func (expc *expandController) Run(ctx context.Context) { defer runtime.HandleCrash() defer expc.queue.ShutDown() - - klog.Infof("Starting expand controller") - defer klog.Infof("Shutting down expand controller") + logger := klog.FromContext(ctx) + logger.Info("Starting expand controller") + defer logger.Info("Shutting down expand controller") if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced, expc.pvSynced) { return @@ -367,8 +367,8 @@ func (expc *expandController) getPersistentVolume(ctx context.Context, pvc *v1.P } // isNodeExpandComplete returns true if pvc.Status.Capacity >= pv.Spec.Capacity -func (expc *expandController) isNodeExpandComplete(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool { - klog.V(4).Infof("pv %q capacity = %v, pvc %s capacity = %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage], pvc.ObjectMeta.Name, pvc.Status.Capacity[v1.ResourceStorage]) +func (expc *expandController) isNodeExpandComplete(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool { + logger.V(4).Info("pv and pvc capacity", "PV", klog.KObj(pv), "pvCapacity", pv.Spec.Capacity[v1.ResourceStorage], "PVC", klog.KObj(pvc), "pvcCapacity", pvc.Status.Capacity[v1.ResourceStorage]) pvcSpecCap := pvc.Spec.Resources.Requests.Storage() pvcStatusCap, pvCap := pvc.Status.Capacity[v1.ResourceStorage], pv.Spec.Capacity[v1.ResourceStorage] @@ -469,7 +469,7 @@ func (expc *expandController) GetServiceAccountTokenFunc() func(_, _ string, _ * func (expc *expandController) DeleteServiceAccountTokenFunc() func(types.UID) { return func(types.UID) { - klog.Errorf("DeleteServiceAccountToken unsupported in expandController") + klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in expandController") } } diff --git a/pkg/controller/volume/persistentvolume/binder_test.go b/pkg/controller/volume/persistentvolume/binder_test.go index 67a70ee8cba..0891d6e6439 100644 --- a/pkg/controller/volume/persistentvolume/binder_test.go +++ b/pkg/controller/volume/persistentvolume/binder_test.go @@ -23,6 +23,7 @@ import ( storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/component-helpers/storage/volume" + "k8s.io/klog/v2/ktesting" ) // Test single call to syncClaim and syncVolume methods. @@ -749,8 +750,8 @@ func TestSync(t *testing.T) { test: testSyncClaim, }, } - - runSyncTests(t, tests, []*storage.StorageClass{ + _, ctx := ktesting.NewTestContext(t) + runSyncTests(t, ctx, tests, []*storage.StorageClass{ { ObjectMeta: metav1.ObjectMeta{Name: classWait}, VolumeBindingMode: &modeWait, @@ -964,8 +965,8 @@ func TestSyncBlockVolume(t *testing.T) { test: testSyncVolume, }, } - - runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{}) + _, ctx := ktesting.NewTestContext(t) + runSyncTests(t, ctx, tests, []*storage.StorageClass{}, []*v1.Pod{}) } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -1016,6 +1017,6 @@ func TestMultiSync(t *testing.T) { test: testSyncClaim, }, } - - runMultisyncTests(t, tests, []*storage.StorageClass{}, "") + _, ctx := ktesting.NewTestContext(t) + runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "") } diff --git a/pkg/controller/volume/persistentvolume/delete_test.go b/pkg/controller/volume/persistentvolume/delete_test.go index a07bc148bc5..3fc269cb63c 100644 --- a/pkg/controller/volume/persistentvolume/delete_test.go +++ b/pkg/controller/volume/persistentvolume/delete_test.go @@ -18,15 +18,16 @@ package persistentvolume import ( "errors" - utilfeature "k8s.io/apiserver/pkg/util/feature" - featuregatetesting "k8s.io/component-base/featuregate/testing" - "k8s.io/kubernetes/pkg/features" "testing" v1 "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/component-helpers/storage/volume" + "k8s.io/klog/v2/ktesting" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" + "k8s.io/kubernetes/pkg/features" ) // Test single call to syncVolume, expecting recycling to happen. @@ -37,6 +38,7 @@ func TestDeleteSync(t *testing.T) { const gceDriver = "pd.csi.storage.gke.io" // Default enable the HonorPVReclaimPolicy feature gate. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)() + _, ctx := ktesting.NewTestContext(t) tests := []controllerTest{ { // delete volume bound by controller @@ -106,7 +108,7 @@ func TestDeleteSync(t *testing.T) { expectedClaims: noclaims, expectedEvents: noevents, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Delete the volume before delete operation starts reactor.DeleteVolume("volume8-6") }), @@ -122,7 +124,7 @@ func TestDeleteSync(t *testing.T) { expectedClaims: newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil), expectedEvents: noevents, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Bind the volume to resurrected claim (this should never // happen) claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil) @@ -217,7 +219,7 @@ func TestDeleteSync(t *testing.T) { test: wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume), }, } - runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{}) + runSyncTests(t, ctx, tests, []*storage.StorageClass{}, []*v1.Pod{}) } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -250,6 +252,6 @@ func TestDeleteMultiSync(t *testing.T) { test: wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume), }, } - - runMultisyncTests(t, tests, []*storage.StorageClass{}, "") + _, ctx := ktesting.NewTestContext(t) + runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "") } diff --git a/pkg/controller/volume/persistentvolume/framework_test.go b/pkg/controller/volume/persistentvolume/framework_test.go index e5a3130084a..b50adabb96f 100644 --- a/pkg/controller/volume/persistentvolume/framework_test.go +++ b/pkg/controller/volume/persistentvolume/framework_test.go @@ -117,9 +117,9 @@ type volumeReactor struct { ctrl *PersistentVolumeController } -func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []pvtesting.ReactorError) *volumeReactor { +func newVolumeReactor(ctx context.Context, client *fake.Clientset, ctrl *PersistentVolumeController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []pvtesting.ReactorError) *volumeReactor { return &volumeReactor{ - pvtesting.NewVolumeReactor(client, fakeVolumeWatch, fakeClaimWatch, errors), + pvtesting.NewVolumeReactor(ctx, client, fakeVolumeWatch, fakeClaimWatch, errors), ctrl, } } @@ -170,14 +170,14 @@ func (r *volumeReactor) waitTest(test controllerTest) error { // checkEvents compares all expectedEvents with events generated during the test // and reports differences. -func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error { +func checkEvents(t *testing.T, ctx context.Context, expectedEvents []string, ctrl *PersistentVolumeController) error { var err error // Read recorded events - wait up to 1 minute to get all the expected ones // (just in case some goroutines are slower with writing) timer := time.NewTimer(time.Minute) defer timer.Stop() - + logger := klog.FromContext(ctx) fakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder) gotEvents := []string{} finished := false @@ -185,14 +185,14 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo select { case event, ok := <-fakeRecorder.Events: if ok { - klog.V(5).Infof("event recorder got event %s", event) + logger.V(5).Info("Event recorder got event", "event", event) gotEvents = append(gotEvents, event) } else { - klog.V(5).Infof("event recorder finished") + logger.V(5).Info("Event recorder finished") finished = true } case _, _ = <-timer.C: - klog.V(5).Infof("event recorder timeout") + logger.V(5).Info("Event recorder timeout") finished = true } } @@ -219,7 +219,7 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo func alwaysReady() bool { return true } -func newTestController(kubeClient clientset.Interface, informerFactory informers.SharedInformerFactory, enableDynamicProvisioning bool) (*PersistentVolumeController, error) { +func newTestController(ctx context.Context, kubeClient clientset.Interface, informerFactory informers.SharedInformerFactory, enableDynamicProvisioning bool) (*PersistentVolumeController, error) { if informerFactory == nil { informerFactory = informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc()) } @@ -235,7 +235,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers EventRecorder: record.NewFakeRecorder(1000), EnableDynamicProvisioning: enableDynamicProvisioning, } - ctrl, err := NewController(params) + ctrl, err := NewController(ctx, params) if err != nil { return nil, fmt.Errorf("failed to construct persistentvolume controller: %v", err) } @@ -586,18 +586,18 @@ const operationDelete = "Delete" const operationRecycle = "Recycle" var ( - classGold string = "gold" - classSilver string = "silver" - classCopper string = "copper" - classEmpty string = "" - classNonExisting string = "non-existing" - classExternal string = "external" - classExternalWait string = "external-wait" - classUnknownInternal string = "unknown-internal" - classUnsupportedMountOptions string = "unsupported-mountoptions" - classLarge string = "large" - classWait string = "wait" - classCSI string = "csi" + classGold = "gold" + classSilver = "silver" + classCopper = "copper" + classEmpty = "" + classNonExisting = "non-existing" + classExternal = "external" + classExternalWait = "external-wait" + classUnknownInternal = "unknown-internal" + classUnsupportedMountOptions = "unsupported-mountoptions" + classLarge = "large" + classWait = "wait" + classCSI = "csi" modeWait = storage.VolumeBindingWaitForFirstConsumer ) @@ -670,13 +670,13 @@ func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall { // injected function to simulate that something is happening when the // controller waits for the operation lock. Controller is then resumed and we // check how it behaves. -func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall { +func wrapTestWithInjectedOperation(ctx context.Context, toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall { return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error { // Inject a hook before async operation starts ctrl.preOperationHook = func(operationName string) { // Inside the hook, run the function to inject - klog.V(4).Infof("reactor: scheduleOperation reached, injecting call") + klog.FromContext(ctx).V(4).Info("Reactor: scheduleOperation reached, injecting call") injectBeforeOperation(ctrl, reactor) } @@ -700,7 +700,7 @@ func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(c } } -func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest, t *testing.T) { +func evaluateTestResults(ctx context.Context, ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest, t *testing.T) { // Evaluate results if err := reactor.CheckClaims(test.expectedClaims); err != nil { t.Errorf("Test %q: %v", test.name, err) @@ -710,7 +710,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo t.Errorf("Test %q: %v", test.name, err) } - if err := checkEvents(t, test.expectedEvents, ctrl); err != nil { + if err := checkEvents(t, ctx, test.expectedEvents, ctrl); err != nil { t.Errorf("Test %q: %v", test.name, err) } } @@ -721,15 +721,15 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo // 2. Call the tested function (syncClaim/syncVolume) via // controllerTest.testCall *once*. // 3. Compare resulting volumes and claims with expected volumes and claims. -func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) { +func runSyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) { doit := func(t *testing.T, test controllerTest) { // Initialize the controller client := &fake.Clientset{} - ctrl, err := newTestController(client, nil, true) + ctrl, err := newTestController(ctx, client, nil, true) if err != nil { t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err) } - reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) + reactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { if metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) { continue @@ -771,7 +771,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag t.Errorf("Test %q failed: %v", test.name, err) } - evaluateTestResults(ctrl, reactor.VolumeReactor, test, t) + evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t) } for _, test := range tests { @@ -797,13 +797,14 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag // of volumes/claims with expected claims/volumes and report differences. // // Some limit of calls in enforced to prevent endless loops. -func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) { +func runMultisyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) { + logger := klog.FromContext(ctx) run := func(t *testing.T, test controllerTest) { - klog.V(4).Infof("starting multisync test %q", test.name) + logger.V(4).Info("Starting multisync test", "testName", test.name) // Initialize the controller client := &fake.Clientset{} - ctrl, err := newTestController(client, nil, true) + ctrl, err := newTestController(ctx, client, nil, true) if err != nil { t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err) } @@ -815,7 +816,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s } ctrl.classLister = storagelisters.NewStorageClassLister(indexer) - reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors) + reactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors) for _, claim := range test.initialClaims { ctrl.claims.Add(claim) } @@ -837,7 +838,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s counter := 0 for { counter++ - klog.V(4).Infof("test %q: iteration %d", test.name, counter) + logger.V(4).Info("Test", "testName", test.name, "iteration", counter) if counter > 100 { t.Errorf("Test %q failed: too many iterations", test.name) @@ -847,7 +848,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s // Wait for all goroutines to finish reactor.waitForIdle() - obj := reactor.PopChange() + obj := reactor.PopChange(ctx) if obj == nil { // Nothing was changed, should we exit? if firstSync || reactor.GetChangeCount() > 0 { @@ -855,7 +856,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s // Simulate "periodic sync" of everything (until it produces // no changes). firstSync = false - klog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name) + logger.V(4).Info("Test simulating periodical sync of all claims and volumes", "testName", test.name) reactor.SyncAll() } else { // Last sync did not produce any updates, the test reached @@ -876,7 +877,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s if err != nil { if err == pvtesting.ErrVersionConflict { // Ignore version errors - klog.V(4).Infof("test intentionally ignores version error.") + logger.V(4).Info("Test intentionally ignores version error") } else { t.Errorf("Error calling syncClaim: %v", err) // Finish the loop on the first error @@ -893,7 +894,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s if err != nil { if err == pvtesting.ErrVersionConflict { // Ignore version errors - klog.V(4).Infof("test intentionally ignores version error.") + logger.V(4).Info("Test intentionally ignores version error") } else { t.Errorf("Error calling syncVolume: %v", err) // Finish the loop on the first error @@ -904,8 +905,8 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s continue } } - evaluateTestResults(ctrl, reactor.VolumeReactor, test, t) - klog.V(4).Infof("test %q finished after %d iterations", test.name, counter) + evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t) + logger.V(4).Info("Test finished after iterations", "testName", test.name, "iterations", counter) } for _, test := range tests { @@ -985,10 +986,10 @@ func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol // Provisioner interfaces -func (plugin *mockVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *mockVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { if len(plugin.provisionCalls) > 0 { // mockVolumePlugin directly implements Provisioner interface - klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner") + logger.V(4).Info("Mock plugin NewProvisioner called, returning mock provisioner") plugin.provisionOptions = options return plugin, nil } else { @@ -1000,11 +1001,10 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi if len(plugin.provisionCalls) <= plugin.provisionCallCounter { return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter) } - var pv *v1.PersistentVolume call := plugin.provisionCalls[plugin.provisionCallCounter] if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) { - klog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters) + klog.TODO().Error(nil, "Invalid provisioner call", "gotOptions", plugin.provisionOptions.Parameters, "expectedOptions", call.expectedParameters) return nil, fmt.Errorf("Mock plugin error: invalid provisioner call") } if call.ret == nil { @@ -1033,16 +1033,16 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi } plugin.provisionCallCounter++ - klog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret) + klog.TODO().V(4).Info("Mock plugin Provision call nr", "provisionCallCounter", plugin.provisionCallCounter, "pv", klog.KObj(pv), "err", call.ret) return pv, call.ret } // Deleter interfaces -func (plugin *mockVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *mockVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { if len(plugin.deleteCalls) > 0 { // mockVolumePlugin directly implements Deleter interface - klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter") + logger.V(4).Info("Mock plugin NewDeleter called, returning mock deleter") return plugin, nil } else { return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured") @@ -1055,7 +1055,7 @@ func (plugin *mockVolumePlugin) Delete() error { } ret := plugin.deleteCalls[plugin.deleteCallCounter] plugin.deleteCallCounter++ - klog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret) + klog.TODO().V(4).Info("Mock plugin Delete call nr", "deleteCallCounter", plugin.deleteCallCounter, "err", ret) return ret } @@ -1081,6 +1081,6 @@ func (plugin *mockVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventR } ret := plugin.recycleCalls[plugin.recycleCallCounter] plugin.recycleCallCounter++ - klog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret) + klog.TODO().V(4).Info("Mock plugin Recycle call nr", "recycleCallCounter", plugin.recycleCallCounter, "err", ret) return ret } diff --git a/pkg/controller/volume/persistentvolume/provision_test.go b/pkg/controller/volume/persistentvolume/provision_test.go index a3a0b357877..c59b45a5331 100644 --- a/pkg/controller/volume/persistentvolume/provision_test.go +++ b/pkg/controller/volume/persistentvolume/provision_test.go @@ -17,11 +17,11 @@ limitations under the License. package persistentvolume import ( - "context" "errors" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/features" "testing" @@ -173,6 +173,7 @@ var provision2Success = provisionCall{ func TestProvisionSync(t *testing.T) { // Default enable the HonorPVReclaimPolicy feature gate. defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)() + _, ctx := ktesting.NewTestContext(t) tests := []controllerTest{ { // Provision a volume (with a default class) @@ -243,7 +244,7 @@ func TestProvisionSync(t *testing.T) { expectedClaims: newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, &classGold, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner), expectedEvents: noevents, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Create a volume before provisionClaimOperation starts. // This similates a parallel controller provisioning the volume. volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, volume.AnnBoundByController, volume.AnnDynamicallyProvisioned) @@ -528,7 +529,7 @@ func TestProvisionSync(t *testing.T) { newClaimArray("claim11-23", "uid11-23", "1Gi", "", v1.ClaimPending, &classCopper, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner)), []string{"Normal ProvisioningSucceeded"}, noerrors, - wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), + wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { nodesIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}} @@ -578,7 +579,7 @@ func TestProvisionSync(t *testing.T) { wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), }, } - runSyncTests(t, tests, storageClasses, []*v1.Pod{}) + runSyncTests(t, ctx, tests, storageClasses, []*v1.Pod{}) } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -597,6 +598,7 @@ func TestProvisionSync(t *testing.T) { // // Some limit of calls in enforced to prevent endless loops. func TestProvisionMultiSync(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) tests := []controllerTest{ { // Provision a volume with binding @@ -620,7 +622,7 @@ func TestProvisionMultiSync(t *testing.T) { newClaimArray("claim12-2", "uid12-2", "1Gi", "pvc-uid12-2", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))), expectedEvents: []string{"Normal ExternalProvisioning"}, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Create a volume before syncClaim tries to bind a PV to PVC // This simulates external provisioner creating a volume while the controller // is waiting for a volume to bind to the existed claim @@ -659,7 +661,7 @@ func TestProvisionMultiSync(t *testing.T) { newClaimArray("claim12-4", "uid12-4", "1Gi", "pvc-uid12-4", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))), expectedEvents: []string{"Normal ExternalProvisioning"}, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Create a volume before syncClaim tries to bind a PV to PVC // This simulates external provisioner creating a volume while the controller // is waiting for a volume to bind to the existed claim @@ -676,16 +678,17 @@ func TestProvisionMultiSync(t *testing.T) { }, } - runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name) + runMultisyncTests(t, ctx, tests, storageClasses, storageClasses[0].Name) } // When provisioning is disabled, provisioning a claim should instantly return nil func TestDisablingDynamicProvisioner(t *testing.T) { - ctrl, err := newTestController(nil, nil, false) + _, ctx := ktesting.NewTestContext(t) + ctrl, err := newTestController(ctx, nil, nil, false) if err != nil { t.Fatalf("Construct PersistentVolume controller failed: %v", err) } - retVal := ctrl.provisionClaim(context.TODO(), nil) + retVal := ctrl.provisionClaim(ctx, nil) if retVal != nil { t.Errorf("Expected nil return but got %v", retVal) } diff --git a/pkg/controller/volume/persistentvolume/pv_controller.go b/pkg/controller/volume/persistentvolume/pv_controller.go index c3514af2de7..a58be28164e 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller.go +++ b/pkg/controller/volume/persistentvolume/pv_controller.go @@ -252,7 +252,8 @@ type PersistentVolumeController struct { // For easier readability, it was split into syncUnboundClaim and syncBoundClaim // methods. func (ctrl *PersistentVolumeController) syncClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) error { - klog.V(4).Infof("synchronizing PersistentVolumeClaim[%s]: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + logger := klog.FromContext(ctx) + logger.V(4).Info("Synchronizing PersistentVolumeClaim", "PVC", klog.KObj(claim), "claimStatus", getClaimStatusForLogging(claim)) // Set correct "migrated-to" annotations on PVC and update in API server if // necessary @@ -267,7 +268,7 @@ func (ctrl *PersistentVolumeController) syncClaim(ctx context.Context, claim *v1 if !metav1.HasAnnotation(claim.ObjectMeta, storagehelpers.AnnBindCompleted) { return ctrl.syncUnboundClaim(ctx, claim) } else { - return ctrl.syncBoundClaim(claim) + return ctrl.syncBoundClaim(ctx, claim) } } @@ -333,6 +334,7 @@ func (ctrl *PersistentVolumeController) emitEventForUnboundDelayBindingClaim(cla func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) error { // This is a new PVC that has not completed binding // OBSERVATION: pvc is "Pending" + logger := klog.FromContext(ctx) if claim.Spec.VolumeName == "" { // User did not care which PV they get. delayBinding, err := storagehelpers.IsDelayBindingMode(claim, ctrl.classLister) @@ -343,23 +345,23 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl // [Unit test set 1] volume, err := ctrl.volumes.findBestMatchForClaim(claim, delayBinding) if err != nil { - klog.V(2).Infof("synchronizing unbound PersistentVolumeClaim[%s]: Error finding PV for claim: %v", claimToClaimKey(claim), err) + logger.V(2).Info("Synchronizing unbound PersistentVolumeClaim, Error finding PV for claim", "PVC", klog.KObj(claim), "err", err) return fmt.Errorf("error finding PV for claim %q: %w", claimToClaimKey(claim), err) } if volume == nil { - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: no volume found", claimToClaimKey(claim)) + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, no volume found", "PVC", klog.KObj(claim)) // No PV could be found // OBSERVATION: pvc is "Pending", will retry if utilfeature.DefaultFeatureGate.Enabled(features.RetroactiveDefaultStorageClass) { - klog.V(4).Infof("FeatureGate[%s] is enabled, attempting to assign storage class to unbound PersistentVolumeClaim[%s]", features.RetroactiveDefaultStorageClass, claimToClaimKey(claim)) - updated, err := ctrl.assignDefaultStorageClass(claim) + logger.V(4).Info("FeatureGate is enabled, attempting to assign storage class to unbound PersistentVolumeClaim", "featureGate", features.RetroactiveDefaultStorageClass, "PVC", klog.KObj(claim)) + updated, err := ctrl.assignDefaultStorageClass(ctx, claim) if err != nil { metrics.RecordRetroactiveStorageClassMetric(false) return fmt.Errorf("can't update PersistentVolumeClaim[%q]: %w", claimToClaimKey(claim), err) } if updated { - klog.V(4).Infof("PersistentVolumeClaim[%q] update successful, restarting claim sync", claimToClaimKey(claim)) + logger.V(4).Info("PersistentVolumeClaim update successful, restarting claim sync", "PVC", klog.KObj(claim)) metrics.RecordRetroactiveStorageClassMetric(true) return nil } @@ -381,7 +383,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl // Mark the claim as Pending and try to find a match in the next // periodic syncClaim - if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { + if _, err = ctrl.updateClaimStatus(ctx, claim, v1.ClaimPending, nil); err != nil { return err } return nil @@ -389,8 +391,8 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl // Found a PV for this claim // OBSERVATION: pvc is "Pending", pv is "Available" claimKey := claimToClaimKey(claim) - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q found: %s", claimKey, volume.Name, getVolumeStatusForLogging(volume)) - if err = ctrl.bind(volume, claim); err != nil { + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume found", "PVC", klog.KObj(claim), "volumeName", volume.Name, "volumeStatus", getVolumeStatusForLogging(volume)) + if err = ctrl.bind(ctx, volume, claim); err != nil { // On any error saving the volume or the claim, subsequent // syncClaim will finish the binding. // record count error for provision if exists @@ -408,7 +410,7 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl } else /* pvc.Spec.VolumeName != nil */ { // [Unit test set 2] // User asked for a specific PV. - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested", claimToClaimKey(claim), claim.Spec.VolumeName) + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume requested", "PVC", klog.KObj(claim), "volumeName", claim.Spec.VolumeName) obj, found, err := ctrl.volumes.store.GetByKey(claim.Spec.VolumeName) if err != nil { return err @@ -417,8 +419,8 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl // User asked for a PV that does not exist. // OBSERVATION: pvc is "Pending" // Retry later. - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and not found, will try again next time", claimToClaimKey(claim), claim.Spec.VolumeName) - if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume requested and not found, will try again next time", "PVC", klog.KObj(claim), "volumeName", claim.Spec.VolumeName) + if _, err = ctrl.updateClaimStatus(ctx, claim, v1.ClaimPending, nil); err != nil { return err } return nil @@ -427,21 +429,21 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl if !ok { return fmt.Errorf("cannot convert object from volume cache to volume %q!?: %+v", claim.Spec.VolumeName, obj) } - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume %q requested and found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume requested and found", "PVC", klog.KObj(claim), "volumeName", claim.Spec.VolumeName, "volumeStatus", getVolumeStatusForLogging(volume)) if volume.Spec.ClaimRef == nil { // User asked for a PV that is not claimed // OBSERVATION: pvc is "Pending", pv is "Available" - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume is unbound, binding", claimToClaimKey(claim)) + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume is unbound, binding", "PVC", klog.KObj(claim)) if err = checkVolumeSatisfyClaim(volume, claim); err != nil { - klog.V(4).Infof("Can't bind the claim to volume %q: %v", volume.Name, err) + logger.V(4).Info("Can't bind the claim to volume", "volumeName", volume.Name, "err", err) // send an event msg := fmt.Sprintf("Cannot bind to requested volume %q: %s", volume.Name, err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.VolumeMismatch, msg) // volume does not satisfy the requirements of the claim - if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { + if _, err = ctrl.updateClaimStatus(ctx, claim, v1.ClaimPending, nil); err != nil { return err } - } else if err = ctrl.bind(volume, claim); err != nil { + } else if err = ctrl.bind(ctx, volume, claim); err != nil { // On any error saving the volume or the claim, subsequent // syncClaim will finish the binding. return err @@ -451,10 +453,10 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl } else if storagehelpers.IsVolumeBoundToClaim(volume, claim) { // User asked for a PV that is claimed by this PVC // OBSERVATION: pvc is "Pending", pv is "Bound" - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound, finishing the binding", claimToClaimKey(claim)) + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume already bound, finishing the binding", "PVC", klog.KObj(claim)) // Finish the volume binding by adding claim UID. - if err = ctrl.bind(volume, claim); err != nil { + if err = ctrl.bind(ctx, volume, claim); err != nil { return err } // OBSERVATION: pvc is "Bound", pv is "Bound" @@ -463,18 +465,18 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl // User asked for a PV that is claimed by someone else // OBSERVATION: pvc is "Pending", pv is "Bound" if !metav1.HasAnnotation(claim.ObjectMeta, storagehelpers.AnnBoundByController) { - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim by user, will retry later", claimToClaimKey(claim)) + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume already bound to different claim by user, will retry later", "PVC", klog.KObj(claim)) claimMsg := fmt.Sprintf("volume %q already bound to a different claim.", volume.Name) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.FailedBinding, claimMsg) // User asked for a specific PV, retry later - if _, err = ctrl.updateClaimStatus(claim, v1.ClaimPending, nil); err != nil { + if _, err = ctrl.updateClaimStatus(ctx, claim, v1.ClaimPending, nil); err != nil { return err } return nil } else { // This should never happen because someone had to remove // AnnBindCompleted annotation on the claim. - klog.V(4).Infof("synchronizing unbound PersistentVolumeClaim[%s]: volume already bound to different claim %q by controller, THIS SHOULD NEVER HAPPEN", claimToClaimKey(claim), claimrefToClaimKey(volume.Spec.ClaimRef)) + logger.V(4).Info("Synchronizing unbound PersistentVolumeClaim, volume already bound to different claim by controller, THIS SHOULD NEVER HAPPEN", "PVC", klog.KObj(claim), "boundClaim", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)) claimMsg := fmt.Sprintf("volume %q already bound to a different claim.", volume.Name) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.FailedBinding, claimMsg) @@ -487,14 +489,17 @@ func (ctrl *PersistentVolumeController) syncUnboundClaim(ctx context.Context, cl // syncBoundClaim is the main controller method to decide what to do with a // bound claim. -func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolumeClaim) error { +func (ctrl *PersistentVolumeController) syncBoundClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) error { // HasAnnotation(pvc, storagehelpers.AnnBindCompleted) // This PVC has previously been bound // OBSERVATION: pvc is not "Pending" // [Unit test set 3] + + logger := klog.FromContext(ctx) + if claim.Spec.VolumeName == "" { // Claim was bound before but not any more. - if _, err := ctrl.updateClaimStatusWithEvent(claim, v1.ClaimLost, nil, v1.EventTypeWarning, "ClaimLost", "Bound claim has lost reference to PersistentVolume. Data on the volume is lost!"); err != nil { + if _, err := ctrl.updateClaimStatusWithEvent(ctx, claim, v1.ClaimLost, nil, v1.EventTypeWarning, "ClaimLost", "Bound claim has lost reference to PersistentVolume. Data on the volume is lost!"); err != nil { return err } return nil @@ -505,7 +510,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum } if !found { // Claim is bound to a non-existing volume. - if _, err = ctrl.updateClaimStatusWithEvent(claim, v1.ClaimLost, nil, v1.EventTypeWarning, "ClaimLost", "Bound claim has lost its PersistentVolume. Data on the volume is lost!"); err != nil { + if _, err = ctrl.updateClaimStatusWithEvent(ctx, claim, v1.ClaimLost, nil, v1.EventTypeWarning, "ClaimLost", "Bound claim has lost its PersistentVolume. Data on the volume is lost!"); err != nil { return err } return nil @@ -515,14 +520,14 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum return fmt.Errorf("cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } - klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume %q found: %s", claimToClaimKey(claim), claim.Spec.VolumeName, getVolumeStatusForLogging(volume)) + logger.V(4).Info("Synchronizing bound PersistentVolumeClaim, volume found", "PVC", klog.KObj(claim), "volumeName", claim.Spec.VolumeName, "volumeStatus", getVolumeStatusForLogging(volume)) if volume.Spec.ClaimRef == nil { // Claim is bound but volume has come unbound. // Or, a claim was bound and the controller has not received updated // volume yet. We can't distinguish these cases. // Bind the volume again and set all states to Bound. - klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: volume is unbound, fixing", claimToClaimKey(claim)) - if err = ctrl.bind(volume, claim); err != nil { + logger.V(4).Info("Synchronizing bound PersistentVolumeClaim, volume is unbound, fixing", "PVC", klog.KObj(claim)) + if err = ctrl.bind(ctx, volume, claim); err != nil { // Objects not saved, next syncPV or syncClaim will try again return err } @@ -532,8 +537,8 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum // NOTE: syncPV can handle this so it can be left out. // NOTE: bind() call here will do nothing in most cases as // everything should be already set. - klog.V(4).Infof("synchronizing bound PersistentVolumeClaim[%s]: claim is already correctly bound", claimToClaimKey(claim)) - if err = ctrl.bind(volume, claim); err != nil { + logger.V(4).Info("Synchronizing bound PersistentVolumeClaim, claim is already correctly bound", "PVC", klog.KObj(claim)) + if err = ctrl.bind(ctx, volume, claim); err != nil { // Objects not saved, next syncPV or syncClaim will try again return err } @@ -542,7 +547,7 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum // Claim is bound but volume has a different claimant. // Set the claim phase to 'Lost', which is a terminal // phase. - if _, err = ctrl.updateClaimStatusWithEvent(claim, v1.ClaimLost, nil, v1.EventTypeWarning, "ClaimMisbound", "Two claims are bound to the same volume, this one is bound incorrectly"); err != nil { + if _, err = ctrl.updateClaimStatusWithEvent(ctx, claim, v1.ClaimLost, nil, v1.EventTypeWarning, "ClaimMisbound", "Two claims are bound to the same volume, this one is bound incorrectly"); err != nil { return err } return nil @@ -555,7 +560,8 @@ func (ctrl *PersistentVolumeController) syncBoundClaim(claim *v1.PersistentVolum // created, updated or periodically synced. We do not differentiate between // these events. func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume *v1.PersistentVolume) error { - klog.V(4).Infof("synchronizing PersistentVolume[%s]: %s", volume.Name, getVolumeStatusForLogging(volume)) + logger := klog.FromContext(ctx) + logger.V(4).Info("Synchronizing PersistentVolume", "volumeName", volume.Name, "volumeStatus", getVolumeStatusForLogging(volume)) // Set correct "migrated-to" annotations and modify finalizers on PV and update in API server if // necessary newVolume, err := ctrl.updateVolumeMigrationAnnotationsAndFinalizers(ctx, volume) @@ -569,8 +575,8 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * // [Unit test set 4] if volume.Spec.ClaimRef == nil { // Volume is unused - klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is unused", volume.Name) - if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil { + logger.V(4).Info("Synchronizing PersistentVolume, volume is unused", "volumeName", volume.Name) + if _, err := ctrl.updateVolumePhase(ctx, volume, v1.VolumeAvailable, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method return err @@ -581,15 +587,15 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * if volume.Spec.ClaimRef.UID == "" { // The PV is reserved for a PVC; that PVC has not yet been // bound to this PV; the PVC sync will handle it. - klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is pre-bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) - if _, err := ctrl.updateVolumePhase(volume, v1.VolumeAvailable, ""); err != nil { + logger.V(4).Info("Synchronizing PersistentVolume, volume is pre-bound to claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name) + if _, err := ctrl.updateVolumePhase(ctx, volume, v1.VolumeAvailable, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method return err } return nil } - klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound to claim %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + logger.V(4).Info("Synchronizing PersistentVolume, volume is bound to claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name) // Get the PVC by _name_ var claim *v1.PersistentVolumeClaim claimName := claimrefToClaimKey(volume.Spec.ClaimRef) @@ -614,7 +620,7 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * } found = !apierrors.IsNotFound(err) if !found { - obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(context.TODO(), volume.Spec.ClaimRef.Name, metav1.GetOptions{}) + obj, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(ctx, volume.Spec.ClaimRef.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } @@ -623,7 +629,7 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * } } if !found { - klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + logger.V(4).Info("Synchronizing PersistentVolume, claim not found", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name) // Fall through with claim = nil } else { var ok bool @@ -631,25 +637,25 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * if !ok { return fmt.Errorf("cannot convert object from volume cache to volume %q!?: %#v", claim.Spec.VolumeName, obj) } - klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s found: %s", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), getClaimStatusForLogging(claim)) + logger.V(4).Info("Synchronizing PersistentVolume, claim found", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "claimStatus", getClaimStatusForLogging(claim), "volumeName", volume.Name) } if claim != nil && claim.UID != volume.Spec.ClaimRef.UID { // The claim that the PV was pointing to was deleted, and another // with the same name created. // in some cases, the cached claim is not the newest, and the volume.Spec.ClaimRef.UID is newer than cached. // so we should double check by calling apiserver and get the newest claim, then compare them. - klog.V(4).Infof("Maybe cached claim: %s is not the newest one, we should fetch it from apiserver", claimrefToClaimKey(volume.Spec.ClaimRef)) + logger.V(4).Info("Maybe cached claim is not the newest one, we should fetch it from apiserver", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)) - claim, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(context.TODO(), volume.Spec.ClaimRef.Name, metav1.GetOptions{}) + claim, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(volume.Spec.ClaimRef.Namespace).Get(ctx, volume.Spec.ClaimRef.Name, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { return err } else if claim != nil { // Treat the volume as bound to a missing claim. if claim.UID != volume.Spec.ClaimRef.UID { - klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has a newer UID than pv.ClaimRef, the old one must have been deleted", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + logger.V(4).Info("Synchronizing PersistentVolume, claim has a newer UID than pv.ClaimRef, the old one must have been deleted", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name) claim = nil } else { - klog.V(4).Infof("synchronizing PersistentVolume[%s]: claim %s has a same UID with pv.ClaimRef", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) + logger.V(4).Info("Synchronizing PersistentVolume, claim has a same UID with pv.ClaimRef", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name) } } } @@ -664,21 +670,21 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * // volume. if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed { // Also, log this only once: - klog.V(2).Infof("volume %q is released and reclaim policy %q will be executed", volume.Name, volume.Spec.PersistentVolumeReclaimPolicy) - if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil { + logger.V(2).Info("Volume is released and reclaim policy will be executed", "volumeName", volume.Name, "reclaimPolicy", volume.Spec.PersistentVolumeReclaimPolicy) + if volume, err = ctrl.updateVolumePhase(ctx, volume, v1.VolumeReleased, ""); err != nil { // Nothing was saved; we will fall back into the same condition // in the next call to this method return err } } - if err = ctrl.reclaimVolume(volume); err != nil { + if err = ctrl.reclaimVolume(ctx, volume); err != nil { // Release failed, we will fall back into the same condition // in the next call to this method return err } if volume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimRetain { // volume is being retained, it references a claim that does not exist now. - klog.V(4).Infof("PersistentVolume[%s] references a claim %q (%s) that is not found", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef), volume.Spec.ClaimRef.UID) + logger.V(4).Info("PersistentVolume references a claim that is not found", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "claimUID", volume.Spec.ClaimRef.UID, "volumeName", volume.Name) } return nil } else if claim.Spec.VolumeName == "" { @@ -695,10 +701,10 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * if metav1.HasAnnotation(volume.ObjectMeta, storagehelpers.AnnBoundByController) { // The binding is not completed; let PVC sync handle it - klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume not bound yet, waiting for syncClaim to fix it", volume.Name) + logger.V(4).Info("Synchronizing PersistentVolume, volume not bound yet, waiting for syncClaim to fix it", "volumeName", volume.Name) } else { // Dangling PV; try to re-establish the link in the PVC sync - klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume was bound and got unbound (by user?), waiting for syncClaim to fix it", volume.Name) + logger.V(4).Info("Synchronizing PersistentVolume, volume was bound and got unbound (by user?), waiting for syncClaim to fix it", "volumeName", volume.Name) } // In both cases, the volume is Bound and the claim is Pending. // Next syncClaim will fix it. To speed it up, we enqueue the claim @@ -711,8 +717,8 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * return nil } else if claim.Spec.VolumeName == volume.Name { // Volume is bound to a claim properly, update status if necessary - klog.V(4).Infof("synchronizing PersistentVolume[%s]: all is bound", volume.Name) - if _, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil { + logger.V(4).Info("Synchronizing PersistentVolume, all is bound", "volumeName", volume.Name) + if _, err = ctrl.updateVolumePhase(ctx, volume, v1.VolumeBound, ""); err != nil { // Nothing was saved; we will fall back into the same // condition in the next call to this method return err @@ -728,14 +734,14 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * // the user know. Don't overwrite existing Failed status! if volume.Status.Phase != v1.VolumeReleased && volume.Status.Phase != v1.VolumeFailed { // Also, log this only once: - klog.V(2).Infof("dynamically provisioned volume %q is released and it will be deleted", volume.Name) - if volume, err = ctrl.updateVolumePhase(volume, v1.VolumeReleased, ""); err != nil { + logger.V(2).Info("Dynamically provisioned volume is released and it will be deleted", "volumeName", volume.Name) + if volume, err = ctrl.updateVolumePhase(ctx, volume, v1.VolumeReleased, ""); err != nil { // Nothing was saved; we will fall back into the same condition // in the next call to this method return err } } - if err = ctrl.reclaimVolume(volume); err != nil { + if err = ctrl.reclaimVolume(ctx, volume); err != nil { // Deletion failed, we will fall back into the same condition // in the next call to this method return err @@ -748,18 +754,18 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * // This is part of the normal operation of the controller; the // controller tried to use this volume for a claim but the claim // was fulfilled by another volume. We did this; fix it. - klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by controller to a claim that is bound to another volume, unbinding", volume.Name) - if err = ctrl.unbindVolume(volume); err != nil { + logger.V(4).Info("Synchronizing PersistentVolume, volume is bound by controller to a claim that is bound to another volume, unbinding", "volumeName", volume.Name) + if err = ctrl.unbindVolume(ctx, volume); err != nil { return err } return nil } else { // The PV must have been created with this ptr; leave it alone. - klog.V(4).Infof("synchronizing PersistentVolume[%s]: volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", volume.Name) + logger.V(4).Info("Synchronizing PersistentVolume, volume is bound by user to a claim that is bound to another volume, waiting for the claim to get unbound", "volumeName", volume.Name) // This just updates the volume phase and clears // volume.Spec.ClaimRef.UID. It leaves the volume pre-bound // to the claim. - if err = ctrl.unbindVolume(volume); err != nil { + if err = ctrl.unbindVolume(ctx, volume); err != nil { return err } return nil @@ -775,8 +781,9 @@ func (ctrl *PersistentVolumeController) syncVolume(ctx context.Context, volume * // claim - claim to update // phase - phase to set // volume - volume which Capacity is set into claim.Status.Capacity -func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { - klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s", claimToClaimKey(claim), phase) +func (ctrl *PersistentVolumeController) updateClaimStatus(ctx context.Context, claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("Updating PersistentVolumeClaim status", "PVC", klog.KObj(claim), "setPhase", phase) dirty := false @@ -814,10 +821,10 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo claimCap, ok := claim.Status.Capacity[v1.ResourceStorage] // If PV has a resize annotation, set the claim's request capacity if metav1.HasAnnotation(volume.ObjectMeta, util.AnnPreResizeCapacity) { - klog.V(2).Infof("volume %q requires filesystem resize: setting pvc %s status capacity to %s", volume.Name, claimToClaimKey(claim), volume.ObjectMeta.Annotations[util.AnnPreResizeCapacity]) + logger.V(2).Info("Volume requires filesystem resize: setting pvc status capacity", "PVC", klog.KObj(claim), "volumeName", volume.Name, "statusCapacity", volume.ObjectMeta.Annotations[util.AnnPreResizeCapacity]) preQty, err := resource.ParseQuantity(volume.ObjectMeta.Annotations[util.AnnPreResizeCapacity]) if err != nil { - klog.Warningf("Parsing pre-resize-capacity from PV(%q) failed", volume.Name, err) + logger.Info("Parsing pre-resize-capacity from PV failed", "volumeName", volume.Name, "err", err) preQty = volume.Spec.Capacity[v1.ResourceStorage] } if claimClone.Status.Capacity == nil { @@ -834,21 +841,21 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo if !dirty { // Nothing to do. - klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: phase %s already set", claimToClaimKey(claim), phase) + logger.V(4).Info("Updating PersistentVolumeClaim status, phase already set", "PVC", klog.KObj(claim), "phase", phase) return claim, nil } - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(context.TODO(), claimClone, metav1.UpdateOptions{}) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claimClone.Namespace).UpdateStatus(ctx, claimClone, metav1.UpdateOptions{}) if err != nil { - klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: set phase %s failed: %v", claimToClaimKey(claim), phase, err) + logger.V(4).Info("Updating PersistentVolumeClaim status, set phase failed", "PVC", klog.KObj(claim), "phase", phase, "err", err) return newClaim, err } - _, err = ctrl.storeClaimUpdate(newClaim) + _, err = ctrl.storeClaimUpdate(logger, newClaim) if err != nil { - klog.V(4).Infof("updating PersistentVolumeClaim[%s] status: cannot update internal cache: %v", claimToClaimKey(claim), err) + logger.V(4).Info("Updating PersistentVolumeClaim status: cannot update internal cache", "PVC", klog.KObj(claim), "err", err) return newClaim, err } - klog.V(2).Infof("claim %q entered phase %q", claimToClaimKey(claim), phase) + logger.V(2).Info("Claim entered phase", "PVC", klog.KObj(claim), "phase", phase) return newClaim, nil } @@ -861,33 +868,35 @@ func (ctrl *PersistentVolumeController) updateClaimStatus(claim *v1.PersistentVo // phase - phase to set // volume - volume which Capacity is set into claim.Status.Capacity // eventtype, reason, message - event to send, see EventRecorder.Event() -func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) { - klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: set phase %s", claimToClaimKey(claim), phase) +func (ctrl *PersistentVolumeController) updateClaimStatusWithEvent(ctx context.Context, claim *v1.PersistentVolumeClaim, phase v1.PersistentVolumeClaimPhase, volume *v1.PersistentVolume, eventtype, reason, message string) (*v1.PersistentVolumeClaim, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("Updating updateClaimStatusWithEvent", "PVC", klog.KObj(claim), "setPhase", phase) if claim.Status.Phase == phase { // Nothing to do. - klog.V(4).Infof("updating updateClaimStatusWithEvent[%s]: phase %s already set", claimToClaimKey(claim), phase) + logger.V(4).Info("Updating updateClaimStatusWithEvent, phase already set", "PVC", klog.KObj(claim), "phase", phase) return claim, nil } - newClaim, err := ctrl.updateClaimStatus(claim, phase, volume) + newClaim, err := ctrl.updateClaimStatus(ctx, claim, phase, volume) if err != nil { return nil, err } // Emit the event only when the status change happens, not every time // syncClaim is called. - klog.V(3).Infof("claim %q changed status to %q: %s", claimToClaimKey(claim), phase, message) + logger.V(3).Info("Claim changed status", "PVC", klog.KObj(claim), "phase", phase, "message", message) ctrl.eventRecorder.Event(newClaim, eventtype, reason, message) return newClaim, nil } // updateVolumePhase saves new volume phase to API server. -func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, message string) (*v1.PersistentVolume, error) { - klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s", volume.Name, phase) +func (ctrl *PersistentVolumeController) updateVolumePhase(ctx context.Context, volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, message string) (*v1.PersistentVolume, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("Updating PersistentVolume", "volumeName", volume.Name, "setPhase", phase) if volume.Status.Phase == phase { // Nothing to do. - klog.V(4).Infof("updating PersistentVolume[%s]: phase %s already set", volume.Name, phase) + logger.V(4).Info("Updating PersistentVolume: phase already set", "volumeName", volume.Name, "phase", phase) return volume, nil } @@ -895,39 +904,40 @@ func (ctrl *PersistentVolumeController) updateVolumePhase(volume *v1.PersistentV volumeClone.Status.Phase = phase volumeClone.Status.Message = message - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(context.TODO(), volumeClone, metav1.UpdateOptions{}) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().UpdateStatus(ctx, volumeClone, metav1.UpdateOptions{}) if err != nil { - klog.V(4).Infof("updating PersistentVolume[%s]: set phase %s failed: %v", volume.Name, phase, err) + logger.V(4).Info("Updating PersistentVolume: set phase failed", "volumeName", volume.Name, "phase", phase, "err", err) return newVol, err } - _, err = ctrl.storeVolumeUpdate(newVol) + _, err = ctrl.storeVolumeUpdate(logger, newVol) if err != nil { - klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) + logger.V(4).Info("Updating PersistentVolume: cannot update internal cache", "volumeName", volume.Name, "err", err) return newVol, err } - klog.V(2).Infof("volume %q entered phase %q", volume.Name, phase) + logger.V(2).Info("Volume entered phase", "volumeName", volume.Name, "phase", phase) return newVol, err } // updateVolumePhaseWithEvent saves new volume phase to API server and emits // given event on the volume. It saves the phase and emits the event only when // the phase has actually changed from the version saved in API server. -func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, eventtype, reason, message string) (*v1.PersistentVolume, error) { - klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: set phase %s", volume.Name, phase) +func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(ctx context.Context, volume *v1.PersistentVolume, phase v1.PersistentVolumePhase, eventtype, reason, message string) (*v1.PersistentVolume, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("Updating updateVolumePhaseWithEvent", "volumeName", volume.Name, "setPhase", phase) if volume.Status.Phase == phase { // Nothing to do. - klog.V(4).Infof("updating updateVolumePhaseWithEvent[%s]: phase %s already set", volume.Name, phase) + logger.V(4).Info("Updating updateVolumePhaseWithEvent: phase already set", "volumeName", volume.Name, "phase", phase) return volume, nil } - newVol, err := ctrl.updateVolumePhase(volume, phase, message) + newVol, err := ctrl.updateVolumePhase(ctx, volume, phase, message) if err != nil { return nil, err } // Emit the event only when the status change happens, not every time // syncClaim is called. - klog.V(3).Infof("volume %q changed status to %q: %s", volume.Name, phase, message) + logger.V(3).Info("Volume changed status", "volumeName", volume.Name, "changedPhase", phase, "message", message) ctrl.eventRecorder.Event(newVol, eventtype, reason, message) return newVol, nil @@ -936,7 +946,9 @@ func (ctrl *PersistentVolumeController) updateVolumePhaseWithEvent(volume *v1.Pe // assignDefaultStorageClass updates the claim storage class if there is any, the claim is updated to the API server. // Ignores claims that already have a storage class. // TODO: if resync is ever changed to a larger period, we might need to change how we set the default class on existing unbound claims -func (ctrl *PersistentVolumeController) assignDefaultStorageClass(claim *v1.PersistentVolumeClaim) (bool, error) { +func (ctrl *PersistentVolumeController) assignDefaultStorageClass(ctx context.Context, claim *v1.PersistentVolumeClaim) (bool, error) { + logger := klog.FromContext(ctx) + if storagehelpers.GetPersistentVolumeClaimClass(claim) != "" { return false, nil } @@ -945,28 +957,29 @@ func (ctrl *PersistentVolumeController) assignDefaultStorageClass(claim *v1.Pers if err != nil { // It is safe to ignore errors here because it means we either could not list SCs or there is more than one default. // TODO: do not ignore errors after this PR is merged: https://github.com/kubernetes/kubernetes/pull/110559 - klog.V(4).Infof("failed to get default storage class: %v", err) + logger.V(4).Info("Failed to get default storage class", "err", err) return false, nil } else if class == nil { - klog.V(4).Infof("can not assign storage class to PersistentVolumeClaim[%s]: default storage class not found", claimToClaimKey(claim)) + logger.V(4).Info("Can not assign storage class to PersistentVolumeClaim: default storage class not found", "PVC", klog.KObj(claim)) return false, nil } - klog.V(4).Infof("assigning StorageClass[%s] to PersistentVolumeClaim[%s]", class.Name, claimToClaimKey(claim)) + logger.V(4).Info("Assigning StorageClass to PersistentVolumeClaim", "PVC", klog.KObj(claim), "storageClassName", class.Name) claim.Spec.StorageClassName = &class.Name - _, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.GetNamespace()).Update(context.TODO(), claim, metav1.UpdateOptions{}) + _, err = ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.GetNamespace()).Update(ctx, claim, metav1.UpdateOptions{}) if err != nil { return false, err } - klog.V(4).Infof("successfully assigned StorageClass[%s] to PersistentVolumeClaim[%s]", claimToClaimKey(claim), class.Name) + logger.V(4).Info("Successfully assigned StorageClass to PersistentVolumeClaim", "PVC", klog.KObj(claim), "storageClassName", class.Name) return true, nil } // bindVolumeToClaim modifies given volume to be bound to a claim and saves it to // API server. The claim is not modified in this method! -func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { - klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q", volume.Name, claimToClaimKey(claim)) +func (ctrl *PersistentVolumeController) bindVolumeToClaim(ctx context.Context, volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("Updating PersistentVolume: binding to claim", "PVC", klog.KObj(claim), "volumeName", volume.Name) volumeClone, dirty, err := storagehelpers.GetBindVolumeToClaim(volume, claim) if err != nil { @@ -975,38 +988,39 @@ func (ctrl *PersistentVolumeController) bindVolumeToClaim(volume *v1.PersistentV // Save the volume only if something was changed if dirty { - return ctrl.updateBindVolumeToClaim(volumeClone, true) + return ctrl.updateBindVolumeToClaim(ctx, volumeClone, true) } - klog.V(4).Infof("updating PersistentVolume[%s]: already bound to %q", volume.Name, claimToClaimKey(claim)) + logger.V(4).Info("Updating PersistentVolume: already bound to claim", "PVC", klog.KObj(claim), "volumeName", volume.Name) return volume, nil } // updateBindVolumeToClaim modifies given volume to be bound to a claim and saves it to // API server. The claim is not modified in this method! -func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) { - claimKey := claimrefToClaimKey(volumeClone.Spec.ClaimRef) - klog.V(2).Infof("claim %q bound to volume %q", claimKey, volumeClone.Name) - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{}) +func (ctrl *PersistentVolumeController) updateBindVolumeToClaim(ctx context.Context, volumeClone *v1.PersistentVolume, updateCache bool) (*v1.PersistentVolume, error) { + logger := klog.FromContext(ctx) + logger.V(2).Info("Claim bound to volume", "PVC", klog.KRef(volumeClone.Spec.ClaimRef.Namespace, volumeClone.Spec.ClaimRef.Name), "volumeName", volumeClone.Name) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(ctx, volumeClone, metav1.UpdateOptions{}) if err != nil { - klog.V(4).Infof("updating PersistentVolume[%s]: binding to %q failed: %v", volumeClone.Name, claimKey, err) + logger.V(4).Info("Updating PersistentVolume: binding to claim failed", "PVC", klog.KRef(volumeClone.Spec.ClaimRef.Namespace, volumeClone.Spec.ClaimRef.Name), "volumeName", volumeClone.Name, "err", err) return newVol, err } if updateCache { - _, err = ctrl.storeVolumeUpdate(newVol) + _, err = ctrl.storeVolumeUpdate(logger, newVol) if err != nil { - klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volumeClone.Name, err) + logger.V(4).Info("Updating PersistentVolume: cannot update internal cache", "volumeName", volumeClone.Name, "err", err) return newVol, err } } - klog.V(4).Infof("updating PersistentVolume[%s]: bound to %q", newVol.Name, claimKey) + logger.V(4).Info("Updating PersistentVolume: bound to claim", "PVC", klog.KRef(volumeClone.Spec.ClaimRef.Namespace, volumeClone.Spec.ClaimRef.Name), "volumeName", newVol.Name) return newVol, nil } // bindClaimToVolume modifies the given claim to be bound to a volume and // saves it to API server. The volume is not modified in this method! -func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { - klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q", claimToClaimKey(claim), volume.Name) +func (ctrl *PersistentVolumeController) bindClaimToVolume(ctx context.Context, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) (*v1.PersistentVolumeClaim, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("Updating PersistentVolumeClaim: binding to volume", "PVC", klog.KObj(claim), "volumeName", volume.Name) dirty := false @@ -1038,22 +1052,22 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo } if dirty { - klog.V(2).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) + logger.V(2).Info("Volume bound to claim", "PVC", klog.KObj(claim), "volumeName", volume.Name) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{}) if err != nil { - klog.V(4).Infof("updating PersistentVolumeClaim[%s]: binding to %q failed: %v", claimToClaimKey(claim), volume.Name, err) + logger.V(4).Info("Updating PersistentVolumeClaim: binding to volume failed", "PVC", klog.KObj(claim), "volumeName", volume.Name, "err", err) return newClaim, err } - _, err = ctrl.storeClaimUpdate(newClaim) + _, err = ctrl.storeClaimUpdate(logger, newClaim) if err != nil { - klog.V(4).Infof("updating PersistentVolumeClaim[%s]: cannot update internal cache: %v", claimToClaimKey(claim), err) + logger.V(4).Info("Updating PersistentVolumeClaim: cannot update internal cache", "PVC", klog.KObj(claim), "err", err) return newClaim, err } - klog.V(4).Infof("updating PersistentVolumeClaim[%s]: bound to %q", claimToClaimKey(claim), volume.Name) + logger.V(4).Info("Updating PersistentVolumeClaim: bound to volume", "PVC", klog.KObj(claim), "volumeName", volume.Name) return newClaim, nil } - klog.V(4).Infof("updating PersistentVolumeClaim[%s]: already bound to %q", claimToClaimKey(claim), volume.Name) + logger.V(4).Info("Updating PersistentVolumeClaim: already bound to volume", "PVC", klog.KObj(claim), "volumeName", volume.Name) return claim, nil } @@ -1061,42 +1075,43 @@ func (ctrl *PersistentVolumeController) bindClaimToVolume(claim *v1.PersistentVo // both objects as Bound. Volume is saved first. // It returns on first error, it's up to the caller to implement some retry // mechanism. -func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) error { +func (ctrl *PersistentVolumeController) bind(ctx context.Context, volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) error { var err error // use updateClaim/updatedVolume to keep the original claim/volume for // logging in error cases. var updatedClaim *v1.PersistentVolumeClaim var updatedVolume *v1.PersistentVolume - klog.V(4).Infof("binding volume %q to claim %q", volume.Name, claimToClaimKey(claim)) + logger := klog.FromContext(ctx) + logger.V(4).Info("Binding volume to claim", "PVC", klog.KObj(claim), "volumeName", volume.Name) - if updatedVolume, err = ctrl.bindVolumeToClaim(volume, claim); err != nil { - klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume: %v", volume.Name, claimToClaimKey(claim), err) + if updatedVolume, err = ctrl.bindVolumeToClaim(ctx, volume, claim); err != nil { + logger.V(3).Info("Error binding volume to claim: failed saving the volume", "PVC", klog.KObj(claim), "volumeName", volume.Name, "err", err) return err } volume = updatedVolume - if updatedVolume, err = ctrl.updateVolumePhase(volume, v1.VolumeBound, ""); err != nil { - klog.V(3).Infof("error binding volume %q to claim %q: failed saving the volume status: %v", volume.Name, claimToClaimKey(claim), err) + if updatedVolume, err = ctrl.updateVolumePhase(ctx, volume, v1.VolumeBound, ""); err != nil { + logger.V(3).Info("Error binding volume to claim: failed saving the volume status", "PVC", klog.KObj(claim), "volumeName", volume.Name, "err", err) return err } volume = updatedVolume - if updatedClaim, err = ctrl.bindClaimToVolume(claim, volume); err != nil { - klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim: %v", volume.Name, claimToClaimKey(claim), err) + if updatedClaim, err = ctrl.bindClaimToVolume(ctx, claim, volume); err != nil { + logger.V(3).Info("Error binding volume to claim: failed saving the claim", "PVC", klog.KObj(claim), "volumeName", volume.Name, "err", err) return err } claim = updatedClaim - if updatedClaim, err = ctrl.updateClaimStatus(claim, v1.ClaimBound, volume); err != nil { - klog.V(3).Infof("error binding volume %q to claim %q: failed saving the claim status: %v", volume.Name, claimToClaimKey(claim), err) + if updatedClaim, err = ctrl.updateClaimStatus(ctx, claim, v1.ClaimBound, volume); err != nil { + logger.V(3).Info("Error binding volume to claim: failed saving the claim status", "PVC", klog.KObj(claim), "volumeName", volume.Name, "err", err) return err } claim = updatedClaim - klog.V(4).Infof("volume %q bound to claim %q", volume.Name, claimToClaimKey(claim)) - klog.V(4).Infof("volume %q status after binding: %s", volume.Name, getVolumeStatusForLogging(volume)) - klog.V(4).Infof("claim %q status after binding: %s", claimToClaimKey(claim), getClaimStatusForLogging(claim)) + logger.V(4).Info("Volume bound to claim", "PVC", klog.KObj(claim), "volumeName", volume.Name) + logger.V(4).Info("Volume status after binding", "volumeName", volume.Name, "volumeStatus", getVolumeStatusForLogging(volume)) + logger.V(4).Info("Claim status after binding", "PVC", klog.KObj(claim), "claimStatus", getClaimStatusForLogging(claim)) return nil } @@ -1106,8 +1121,9 @@ func (ctrl *PersistentVolumeController) bind(volume *v1.PersistentVolume, claim // This method updates both Spec and Status. // It returns on first error, it's up to the caller to implement some retry // mechanism. -func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume) error { - klog.V(4).Infof("updating PersistentVolume[%s]: rolling back binding from %q", volume.Name, claimrefToClaimKey(volume.Spec.ClaimRef)) +func (ctrl *PersistentVolumeController) unbindVolume(ctx context.Context, volume *v1.PersistentVolume) error { + logger := klog.FromContext(ctx) + logger.V(4).Info("Updating PersistentVolume: rolling back binding from claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name) // Save the PV only when any modification is necessary. volumeClone := volume.DeepCopy() @@ -1126,26 +1142,27 @@ func (ctrl *PersistentVolumeController) unbindVolume(volume *v1.PersistentVolume volumeClone.Spec.ClaimRef.UID = "" } - newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(context.TODO(), volumeClone, metav1.UpdateOptions{}) + newVol, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Update(ctx, volumeClone, metav1.UpdateOptions{}) if err != nil { - klog.V(4).Infof("updating PersistentVolume[%s]: rollback failed: %v", volume.Name, err) + logger.V(4).Info("Updating PersistentVolume: rollback failed", "volumeName", volume.Name, "err", err) return err } - _, err = ctrl.storeVolumeUpdate(newVol) + _, err = ctrl.storeVolumeUpdate(logger, newVol) if err != nil { - klog.V(4).Infof("updating PersistentVolume[%s]: cannot update internal cache: %v", volume.Name, err) + logger.V(4).Info("Updating PersistentVolume: cannot update internal cache", "volumeName", volume.Name, "err", err) return err } - klog.V(4).Infof("updating PersistentVolume[%s]: rolled back", newVol.Name) + logger.V(4).Info("Updating PersistentVolume: rolled back", "volumeName", newVol.Name) // Update the status - _, err = ctrl.updateVolumePhase(newVol, v1.VolumeAvailable, "") + _, err = ctrl.updateVolumePhase(ctx, newVol, v1.VolumeAvailable, "") return err } // reclaimVolume implements volume.Spec.PersistentVolumeReclaimPolicy and // starts appropriate reclaim action. -func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolume) error { +func (ctrl *PersistentVolumeController) reclaimVolume(ctx context.Context, volume *v1.PersistentVolume) error { + logger := klog.FromContext(ctx) if migrated := volume.Annotations[storagehelpers.AnnMigratedTo]; len(migrated) > 0 { // PV is Migrated. The PV controller should stand down and the external // provisioner will handle this PV @@ -1153,24 +1170,24 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum } switch volume.Spec.PersistentVolumeReclaimPolicy { case v1.PersistentVolumeReclaimRetain: - klog.V(4).Infof("reclaimVolume[%s]: policy is Retain, nothing to do", volume.Name) + logger.V(4).Info("ReclaimVolume: policy is Retain, nothing to do", "volumeName", volume.Name) case v1.PersistentVolumeReclaimRecycle: - klog.V(4).Infof("reclaimVolume[%s]: policy is Recycle", volume.Name) + logger.V(4).Info("ReclaimVolume: policy is Recycle", "volumeName", volume.Name) opName := fmt.Sprintf("recycle-%s[%s]", volume.Name, string(volume.UID)) - ctrl.scheduleOperation(opName, func() error { - ctrl.recycleVolumeOperation(volume) + ctrl.scheduleOperation(logger, opName, func() error { + ctrl.recycleVolumeOperation(ctx, volume) return nil }) case v1.PersistentVolumeReclaimDelete: - klog.V(4).Infof("reclaimVolume[%s]: policy is Delete", volume.Name) + logger.V(4).Info("ReclaimVolume: policy is Delete", "volumeName", volume.Name) opName := fmt.Sprintf("delete-%s[%s]", volume.Name, string(volume.UID)) // create a start timestamp entry in cache for deletion operation if no one exists with // key = volume.Name, pluginName = provisionerName, operation = "delete" ctrl.operationTimestamps.AddIfNotExist(volume.Name, ctrl.getProvisionerNameFromVolume(volume), "delete") - ctrl.scheduleOperation(opName, func() error { - _, err := ctrl.deleteVolumeOperation(volume) + ctrl.scheduleOperation(logger, opName, func() error { + _, err := ctrl.deleteVolumeOperation(ctx, volume) if err != nil { // only report error count to "volume_operation_total_errors" // latency reporting will happen when the volume get finally @@ -1182,7 +1199,7 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum default: // Unknown PersistentVolumeReclaimPolicy - if _, err := ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, "VolumeUnknownReclaimPolicy", "Volume has unrecognized PersistentVolumeReclaimPolicy"); err != nil { + if _, err := ctrl.updateVolumePhaseWithEvent(ctx, volume, v1.VolumeFailed, v1.EventTypeWarning, "VolumeUnknownReclaimPolicy", "Volume has unrecognized PersistentVolumeReclaimPolicy"); err != nil { return err } } @@ -1191,29 +1208,30 @@ func (ctrl *PersistentVolumeController) reclaimVolume(volume *v1.PersistentVolum // recycleVolumeOperation recycles a volume. This method is running in // standalone goroutine and already has all necessary locks. -func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.PersistentVolume) { - klog.V(4).Infof("recycleVolumeOperation [%s] started", volume.Name) +func (ctrl *PersistentVolumeController) recycleVolumeOperation(ctx context.Context, volume *v1.PersistentVolume) { + logger := klog.FromContext(ctx) + logger.V(4).Info("RecycleVolumeOperation started", "volumeName", volume.Name) // This method may have been waiting for a volume lock for some time. // Previous recycleVolumeOperation might just have saved an updated version, // so read current volume state now. - newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, volume.Name, metav1.GetOptions{}) if err != nil { - klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) + logger.V(3).Info("Error reading persistent volume", "volumeName", volume.Name, "err", err) return } - needsReclaim, err := ctrl.isVolumeReleased(newVolume) + needsReclaim, err := ctrl.isVolumeReleased(logger, newVolume) if err != nil { - klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + logger.V(3).Info("Error reading claim for volume", "volumeName", volume.Name, "err", err) return } if !needsReclaim { - klog.V(3).Infof("volume %q no longer needs recycling, skipping", volume.Name) + logger.V(3).Info("Volume no longer needs recycling, skipping", "volumeName", volume.Name) return } pods, used, err := ctrl.isVolumeUsed(newVolume) if err != nil { - klog.V(3).Infof("can't recycle volume %q: %v", volume.Name, err) + logger.V(3).Info("Can't recycle volume", "volumeName", volume.Name, "err", err) return } @@ -1224,13 +1242,13 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis claimName := claimrefToClaimKey(volume.Spec.ClaimRef) _, claimCached, err := ctrl.claims.GetByKey(claimName) if err != nil { - klog.V(3).Infof("error getting the claim %s from cache", claimName) + logger.V(3).Info("Error getting the claim from cache", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)) return } if used && !claimCached { msg := fmt.Sprintf("Volume is used by pods: %s", strings.Join(pods, ",")) - klog.V(3).Infof("can't recycle volume %q: %s", volume.Name, msg) + logger.V(3).Info("Can't recycle volume", "volumeName", volume.Name, "msg", msg) ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeFailedRecycle, msg) return } @@ -1244,8 +1262,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis plugin, err := ctrl.volumePluginMgr.FindRecyclablePluginBySpec(spec) if err != nil { // No recycler found. Emit an event and mark the volume Failed. - if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, "No recycler plugin found for the volume!"); err != nil { - klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + if _, err = ctrl.updateVolumePhaseWithEvent(ctx, volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, "No recycler plugin found for the volume!"); err != nil { + logger.V(4).Info("RecycleVolumeOperation: failed to mark volume as failed", "volumeName", volume.Name, "err", err) // Save failed, retry on the next deletion attempt return } @@ -1260,8 +1278,8 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis if err = plugin.Recycle(volume.Name, spec, recorder); err != nil { // Recycler failed strerr := fmt.Sprintf("Recycle failed: %s", err) - if _, err = ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, strerr); err != nil { - klog.V(4).Infof("recycleVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + if _, err = ctrl.updateVolumePhaseWithEvent(ctx, volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedRecycle, strerr); err != nil { + logger.V(4).Info("RecycleVolumeOperation: failed to mark volume as failed", "volumeName", volume.Name, "err", err) // Save failed, retry on the next deletion attempt return } @@ -1270,54 +1288,55 @@ func (ctrl *PersistentVolumeController) recycleVolumeOperation(volume *v1.Persis return } - klog.V(2).Infof("volume %q recycled", volume.Name) + logger.V(2).Info("Volume recycled", "volumeName", volume.Name) // Send an event ctrl.eventRecorder.Event(volume, v1.EventTypeNormal, events.VolumeRecycled, "Volume recycled") // Make the volume available again - if err = ctrl.unbindVolume(volume); err != nil { + if err = ctrl.unbindVolume(ctx, volume); err != nil { // Oops, could not save the volume and therefore the controller will // recycle the volume again on next update. We _could_ maintain a cache // of "recently recycled volumes" and avoid unnecessary recycling, this // is left out as future optimization. - klog.V(3).Infof("recycleVolumeOperation [%s]: failed to make recycled volume 'Available' (%v), we will recycle the volume again", volume.Name, err) + logger.V(3).Info("RecycleVolumeOperation: failed to make recycled volume 'Available', we will recycle the volume again", "volumeName", volume.Name, "err", err) return } } // deleteVolumeOperation deletes a volume. This method is running in standalone // goroutine and already has all necessary locks. -func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.PersistentVolume) (string, error) { - klog.V(4).Infof("deleteVolumeOperation [%s] started", volume.Name) +func (ctrl *PersistentVolumeController) deleteVolumeOperation(ctx context.Context, volume *v1.PersistentVolume) (string, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("DeleteVolumeOperation started", "volumeName", volume.Name) // This method may have been waiting for a volume lock for some time. // Previous deleteVolumeOperation might just have saved an updated version, so // read current volume state now. - newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), volume.Name, metav1.GetOptions{}) + newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, volume.Name, metav1.GetOptions{}) if err != nil { - klog.V(3).Infof("error reading persistent volume %q: %v", volume.Name, err) + logger.V(3).Info("Error reading persistent volume", "volumeName", volume.Name, "err", err) return "", nil } if !utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) { if newVolume.GetDeletionTimestamp() != nil { - klog.V(3).Infof("Volume %q is already being deleted", volume.Name) + logger.V(3).Info("Volume is already being deleted", "volumeName", volume.Name) return "", nil } } - needsReclaim, err := ctrl.isVolumeReleased(newVolume) + needsReclaim, err := ctrl.isVolumeReleased(logger, newVolume) if err != nil { - klog.V(3).Infof("error reading claim for volume %q: %v", volume.Name, err) + logger.V(3).Info("Error reading claim for volume", "volumeName", volume.Name, "err", err) return "", nil } if !needsReclaim { - klog.V(3).Infof("volume %q no longer needs deletion, skipping", volume.Name) + logger.V(3).Info("Volume no longer needs deletion, skipping", "volumeName", volume.Name) return "", nil } - pluginName, deleted, err := ctrl.doDeleteVolume(volume) + pluginName, deleted, err := ctrl.doDeleteVolume(ctx, volume) if err != nil { // Delete failed, update the volume and emit an event. - klog.V(3).Infof("deletion of volume %q failed: %v", volume.Name, err) + logger.V(3).Info("Deletion of volume failed", "volumeName", volume.Name, "err", err) if volerr.IsDeletedVolumeInUse(err) { // The plugin needs more time, don't mark the volume as Failed // and send Normal event only @@ -1325,8 +1344,8 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist } else { // The plugin failed, mark the volume as Failed and send Warning // event - if _, err := ctrl.updateVolumePhaseWithEvent(volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedDelete, err.Error()); err != nil { - klog.V(4).Infof("deleteVolumeOperation [%s]: failed to mark volume as failed: %v", volume.Name, err) + if _, err := ctrl.updateVolumePhaseWithEvent(ctx, volume, v1.VolumeFailed, v1.EventTypeWarning, events.VolumeFailedDelete, err.Error()); err != nil { + logger.V(4).Info("DeleteVolumeOperation: failed to mark volume as failed", "volumeName", volume.Name, "err", err) // Save failed, retry on the next deletion attempt return pluginName, err } @@ -1341,14 +1360,14 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist return pluginName, nil } - klog.V(4).Infof("deleteVolumeOperation [%s]: success", volume.Name) + logger.V(4).Info("DeleteVolumeOperation: success", "volumeName", volume.Name) // Delete the volume - if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(context.TODO(), volume.Name, metav1.DeleteOptions{}); err != nil { + if err = ctrl.kubeClient.CoreV1().PersistentVolumes().Delete(ctx, volume.Name, metav1.DeleteOptions{}); err != nil { // Oops, could not delete the volume and therefore the controller will // try to delete the volume again on next update. We _could_ maintain a // cache of "recently deleted volumes" and avoid unnecessary deletion, // this is left out as future optimization. - klog.V(3).Infof("failed to delete volume %q from database: %v", volume.Name, err) + logger.V(3).Info("Failed to delete volume from database", "volumeName", volume.Name, "err", err) return pluginName, nil } return pluginName, nil @@ -1357,17 +1376,17 @@ func (ctrl *PersistentVolumeController) deleteVolumeOperation(volume *v1.Persist // isVolumeReleased returns true if given volume is released and can be recycled // or deleted, based on its retain policy. I.e. the volume is bound to a claim // and the claim does not exist or exists and is bound to different volume. -func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVolume) (bool, error) { +func (ctrl *PersistentVolumeController) isVolumeReleased(logger klog.Logger, volume *v1.PersistentVolume) (bool, error) { // A volume needs reclaim if it has ClaimRef and appropriate claim does not // exist. if volume.Spec.ClaimRef == nil { - klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is nil", volume.Name) + logger.V(4).Info("isVolumeReleased: ClaimRef is nil", "volumeName", volume.Name) return false, nil } if volume.Spec.ClaimRef.UID == "" { // This is a volume bound by user and the controller has not finished // binding to the real claim yet. - klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is not bound", volume.Name) + logger.V(4).Info("isVolumeReleased: ClaimRef is not bound", "volumeName", volume.Name) return false, nil } @@ -1394,11 +1413,11 @@ func (ctrl *PersistentVolumeController) isVolumeReleased(volume *v1.PersistentVo return true, nil } - klog.V(4).Infof("isVolumeReleased[%s]: ClaimRef is still valid, volume is not released", volume.Name) + logger.V(4).Info("isVolumeReleased: ClaimRef is still valid, volume is not released", "volumeName", volume.Name) return false, nil } - klog.V(2).Infof("isVolumeReleased[%s]: volume is released", volume.Name) + logger.V(2).Info("isVolumeReleased: volume is released", "volumeName", volume.Name) return true, nil } @@ -1461,8 +1480,9 @@ func (ctrl *PersistentVolumeController) findNonScheduledPodsByPVC(pvc *v1.Persis // the volume plugin name. Also, it returns 'true', when the volume was deleted and // 'false' when the volume cannot be deleted because the deleter is external. No // error should be reported in this case. -func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolume) (string, bool, error) { - klog.V(4).Infof("doDeleteVolume [%s]", volume.Name) +func (ctrl *PersistentVolumeController) doDeleteVolume(ctx context.Context, volume *v1.PersistentVolume) (string, bool, error) { + logger := klog.FromContext(ctx) + logger.V(4).Info("doDeleteVolume", "volumeName", volume.Name) var err error plugin, err := ctrl.findDeletablePlugin(volume) @@ -1471,15 +1491,15 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu } if plugin == nil { // External deleter is requested, do nothing - klog.V(3).Infof("external deleter for volume %q requested, ignoring", volume.Name) + logger.V(3).Info("External deleter for volume requested, ignoring", "volumeName", volume.Name) return "", false, nil } // Plugin found pluginName := plugin.GetPluginName() - klog.V(5).Infof("found a deleter plugin %q for volume %q", pluginName, volume.Name) + logger.V(5).Info("Found a deleter plugin for volume", "pluginName", pluginName, "volumeName", volume.Name) spec := vol.NewSpecFromPersistentVolume(volume, false) - deleter, err := plugin.NewDeleter(spec) + deleter, err := plugin.NewDeleter(logger, spec) if err != nil { // Cannot create deleter return pluginName, false, fmt.Errorf("failed to create deleter for volume %q: %w", volume.Name, err) @@ -1492,10 +1512,10 @@ func (ctrl *PersistentVolumeController) doDeleteVolume(volume *v1.PersistentVolu // Deleter failed return pluginName, false, err } - klog.V(2).Infof("volume %q deleted", volume.Name) + logger.V(2).Info("Volume deleted", "volumeName", volume.Name) // Remove in-tree delete finalizer on the PV as the volume has been deleted from the underlying storage if utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) { - err = ctrl.removeDeletionProtectionFinalizer(context.TODO(), volume) + err = ctrl.removeDeletionProtectionFinalizer(ctx, volume) if err != nil { return pluginName, true, err } @@ -1507,9 +1527,10 @@ func (ctrl *PersistentVolumeController) removeDeletionProtectionFinalizer(ctx co var err error pvUpdateNeeded := false // Retrieve latest version + logger := klog.FromContext(ctx) newVolume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, volume.Name, metav1.GetOptions{}) if err != nil { - klog.Errorf("error reading persistent volume %q: %v", volume.Name, err) + logger.Error(err, "Error reading persistent volume", "volumeName", volume.Name) return err } volume = newVolume @@ -1525,11 +1546,11 @@ func (ctrl *PersistentVolumeController) removeDeletionProtectionFinalizer(ctx co if err != nil { return fmt.Errorf("persistent volume controller can't update finalizer: %v", err) } - _, err = ctrl.storeVolumeUpdate(volumeClone) + _, err = ctrl.storeVolumeUpdate(logger, volumeClone) if err != nil { return fmt.Errorf("persistent Volume Controller can't anneal migration finalizer: %v", err) } - klog.V(2).Infof("PV in-tree protection finalizer removed from volume: %q", volume.Name) + logger.V(2).Info("PV in-tree protection finalizer removed from volume", "volumeName", volume.Name) } return nil } @@ -1540,19 +1561,20 @@ func (ctrl *PersistentVolumeController) provisionClaim(ctx context.Context, clai if !ctrl.enableDynamicProvisioning { return nil } - klog.V(4).Infof("provisionClaim[%s]: started", claimToClaimKey(claim)) + logger := klog.FromContext(ctx) + logger.V(4).Info("provisionClaim: started", "PVC", klog.KObj(claim)) opName := fmt.Sprintf("provision-%s[%s]", claimToClaimKey(claim), string(claim.UID)) plugin, storageClass, err := ctrl.findProvisionablePlugin(claim) // findProvisionablePlugin does not return err for external provisioners if err != nil { ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, err.Error()) - klog.Errorf("error finding provisioning plugin for claim %s: %v", claimToClaimKey(claim), err) + logger.Error(err, "Error finding provisioning plugin for claim", "PVC", klog.KObj(claim)) // failed to find the requested provisioning plugin, directly return err for now. // controller will retry the provisioning in every syncUnboundClaim() call // retain the original behavior of returning nil from provisionClaim call return nil } - ctrl.scheduleOperation(opName, func() error { + ctrl.scheduleOperation(logger, opName, func() error { // create a start timestamp entry in cache for provision operation if no one exists with // key = claimKey, pluginName = provisionerName, operation = "provision" claimKey := claimToClaimKey(claim) @@ -1581,7 +1603,8 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( plugin vol.ProvisionableVolumePlugin, storageClass *storage.StorageClass) (string, error) { claimClass := storagehelpers.GetPersistentVolumeClaimClass(claim) - klog.V(4).Infof("provisionClaimOperation [%s] started, class: %q", claimToClaimKey(claim), claimClass) + logger := klog.FromContext(ctx) + logger.V(4).Info("provisionClaimOperation started", "PVC", klog.KObj(claim), "storageClassName", claimClass) // called from provisionClaim(), in this case, plugin MUST NOT be nil // NOTE: checks on plugin/storageClass has been saved @@ -1590,19 +1613,19 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // Only CSI plugin can have a DataSource. Fail the operation // if Datasource in Claim is not nil and it is not a CSI plugin, strerr := fmt.Sprintf("plugin %q is not a CSI plugin. Only CSI plugin can provision a claim with a datasource", pluginName) - klog.V(2).Infof(strerr) + logger.V(2).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, fmt.Errorf(strerr) } provisionerName := storageClass.Provisioner - klog.V(4).Infof("provisionClaimOperation [%s]: plugin name: %s, provisioner name: %s", claimToClaimKey(claim), pluginName, provisionerName) + logger.V(4).Info("provisionClaimOperation", "PVC", klog.KObj(claim), "pluginName", pluginName, "provisionerName", provisionerName) // Add provisioner annotation to be consistent with external provisioner workflow newClaim, err := ctrl.setClaimProvisioner(ctx, claim, provisionerName) if err != nil { // Save failed, the controller will retry in the next sync - klog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err) + logger.V(2).Info("Error saving claim", "PVC", klog.KObj(claim), "err", err) return pluginName, err } claim = newClaim @@ -1614,14 +1637,14 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // yet. pvName := ctrl.getProvisionedVolumeNameForClaim(claim) - volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) + volume, err := ctrl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) if err != nil && !apierrors.IsNotFound(err) { - klog.V(3).Infof("error reading persistent volume %q: %v", pvName, err) + logger.V(3).Info("Error reading persistent volume", "PV", klog.KRef("", pvName), "err", err) return pluginName, err } if err == nil && volume != nil { // Volume has been already provisioned, nothing to do. - klog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) + logger.V(4).Info("provisionClaimOperation: volume already exists, skipping", "PVC", klog.KObj(claim)) return pluginName, err } @@ -1629,7 +1652,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // provisioned) claimRef, err := ref.GetReference(scheme.Scheme, claim) if err != nil { - klog.V(3).Infof("unexpected error getting claim reference: %v", err) + logger.V(3).Info("Unexpected error getting claim reference", "err", err) return pluginName, err } @@ -1653,16 +1676,16 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // of PV would be rejected by validation anyway if !plugin.SupportsMountOption() && len(options.MountOptions) > 0 { strerr := fmt.Sprintf("Mount options are not supported by the provisioner but StorageClass %q has mount options %v", storageClass.Name, options.MountOptions) - klog.V(2).Infof("Mount options are not supported by the provisioner but claim %q's StorageClass %q has mount options %v", claimToClaimKey(claim), storageClass.Name, options.MountOptions) + logger.V(2).Info("Mount options are not supported by the provisioner but claim's StorageClass has mount options", "PVC", klog.KObj(claim), "storageClassName", storageClass.Name, "options", options.MountOptions) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, fmt.Errorf("provisioner %q doesn't support mount options", plugin.GetPluginName()) } // Provision the volume - provisioner, err := plugin.NewProvisioner(options) + provisioner, err := plugin.NewProvisioner(logger, options) if err != nil { strerr := fmt.Sprintf("Failed to create provisioner: %v", err) - klog.V(2).Infof("failed to create provisioner for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) + logger.V(2).Info("Failed to create provisioner for claim with StorageClass", "PVC", klog.KObj(claim), "storageClassName", storageClass.Name, "err", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } @@ -1672,7 +1695,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( selectedNode, err = ctrl.NodeLister.Get(nodeName) if err != nil { strerr := fmt.Sprintf("Failed to get target node: %v", err) - klog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err) + logger.V(3).Info("Unexpected error getting target node for claim", "node", klog.KRef("", nodeName), "PVC", klog.KObj(claim), "err", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } @@ -1686,15 +1709,15 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // Other places of failure have nothing to do with VolumeScheduling, // so just let controller retry in the next sync. We'll only call func // rescheduleProvisioning here when the underlying provisioning actually failed. - ctrl.rescheduleProvisioning(claim) + ctrl.rescheduleProvisioning(ctx, claim) strerr := fmt.Sprintf("Failed to provision volume with StorageClass %q: %v", storageClass.Name, err) - klog.V(2).Infof("failed to provision volume for claim %q with StorageClass %q: %v", claimToClaimKey(claim), storageClass.Name, err) + logger.V(2).Info("Failed to provision volume for claim with StorageClass", "PVC", klog.KObj(claim), "storageClassName", storageClass.Name, "err", err) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return pluginName, err } - klog.V(3).Infof("volume %q for claim %q created", volume.Name, claimToClaimKey(claim)) + logger.V(3).Info("Volume for claim created", "PVC", klog.KObj(claim), "volumeName", volume.Name) // Create Kubernetes PV object for the volume. if volume.Name == "" { @@ -1718,26 +1741,26 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // Try to create the PV object several times for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { - klog.V(4).Infof("provisionClaimOperation [%s]: trying to save volume %s", claimToClaimKey(claim), volume.Name) + logger.V(4).Info("provisionClaimOperation: trying to save volume", "PVC", klog.KObj(claim), "volumeName", volume.Name) var newVol *v1.PersistentVolume - if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(context.TODO(), volume, metav1.CreateOptions{}); err == nil || apierrors.IsAlreadyExists(err) { + if newVol, err = ctrl.kubeClient.CoreV1().PersistentVolumes().Create(ctx, volume, metav1.CreateOptions{}); err == nil || apierrors.IsAlreadyExists(err) { // Save succeeded. if err != nil { - klog.V(3).Infof("volume %q for claim %q already exists, reusing", volume.Name, claimToClaimKey(claim)) + logger.V(3).Info("Volume for claim already exists, reusing", "PVC", klog.KObj(claim), "volumeName", volume.Name) err = nil } else { - klog.V(3).Infof("volume %q for claim %q saved", volume.Name, claimToClaimKey(claim)) + logger.V(3).Info("Volume for claim saved", "PVC", klog.KObj(claim), "volumeName", volume.Name) - _, updateErr := ctrl.storeVolumeUpdate(newVol) + _, updateErr := ctrl.storeVolumeUpdate(logger, newVol) if updateErr != nil { // We will get an "volume added" event soon, this is not a big error - klog.V(4).Infof("provisionClaimOperation [%s]: cannot update internal cache: %v", volume.Name, updateErr) + logger.V(4).Info("provisionClaimOperation: cannot update internal cache", "volumeName", volume.Name, "err", updateErr) } } break } // Save failed, try again after a while. - klog.V(3).Infof("failed to save volume %q for claim %q: %v", volume.Name, claimToClaimKey(claim), err) + logger.V(3).Info("Failed to save volume for claim", "PVC", klog.KObj(claim), "volumeName", volume.Name, "err", err) time.Sleep(ctrl.createProvisionedPVInterval) } @@ -1747,27 +1770,27 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // Emit some event here and try to delete the storage asset several // times. strerr := fmt.Sprintf("Error creating provisioned PV object for claim %s: %v. Deleting the volume.", claimToClaimKey(claim), err) - klog.V(3).Info(strerr) + logger.V(3).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) var deleteErr error var deleted bool for i := 0; i < ctrl.createProvisionedPVRetryCount; i++ { - _, deleted, deleteErr = ctrl.doDeleteVolume(volume) + _, deleted, deleteErr = ctrl.doDeleteVolume(ctx, volume) if deleteErr == nil && deleted { // Delete succeeded - klog.V(4).Infof("provisionClaimOperation [%s]: cleaning volume %s succeeded", claimToClaimKey(claim), volume.Name) + logger.V(4).Info("provisionClaimOperation: cleaning volume succeeded", "PVC", klog.KObj(claim), "volumeName", volume.Name) break } if !deleted { // This is unreachable code, the volume was provisioned by an // internal plugin and therefore there MUST be an internal // plugin that deletes it. - klog.Errorf("Error finding internal deleter for volume plugin %q", plugin.GetPluginName()) + logger.Error(nil, "Error finding internal deleter for volume plugin", "plugin", plugin.GetPluginName()) break } // Delete failed, try again after a while. - klog.V(3).Infof("failed to delete volume %q: %v", volume.Name, deleteErr) + logger.V(3).Info("Failed to delete volume", "volumeName", volume.Name, "err", deleteErr) time.Sleep(ctrl.createProvisionedPVInterval) } @@ -1775,11 +1798,11 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation( // Delete failed several times. There is an orphaned volume and there // is nothing we can do about it. strerr := fmt.Sprintf("Error cleaning provisioned volume for claim %s: %v. Please delete manually.", claimToClaimKey(claim), deleteErr) - klog.V(2).Info(strerr) + logger.V(2).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningCleanupFailed, strerr) } } else { - klog.V(2).Infof("volume %q provisioned for claim %q", volume.Name, claimToClaimKey(claim)) + logger.V(2).Info("Volume provisioned for claim", "PVC", klog.KObj(claim), "volumeName", volume.Name) msg := fmt.Sprintf("Successfully provisioned volume %s using %s", volume.Name, plugin.GetPluginName()) ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ProvisioningSucceeded, msg) } @@ -1793,7 +1816,8 @@ func (ctrl *PersistentVolumeController) provisionClaimOperationExternal( claim *v1.PersistentVolumeClaim, storageClass *storage.StorageClass) (string, error) { claimClass := storagehelpers.GetPersistentVolumeClaimClass(claim) - klog.V(4).Infof("provisionClaimOperationExternal [%s] started, class: %q", claimToClaimKey(claim), claimClass) + logger := klog.FromContext(ctx) + logger.V(4).Info("provisionClaimOperationExternal started", "PVC", klog.KObj(claim), "storageClassName", claimClass) // Set provisionerName to external provisioner name by setClaimProvisioner var err error provisionerName := storageClass.Provisioner @@ -1802,7 +1826,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperationExternal( provisionerName, err = ctrl.translator.GetCSINameFromInTreeName(storageClass.Provisioner) if err != nil { strerr := fmt.Sprintf("error getting CSI name for In tree plugin %s: %v", storageClass.Provisioner, err) - klog.V(2).Infof("%s", strerr) + logger.V(2).Info(strerr) ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr) return provisionerName, err } @@ -1811,7 +1835,7 @@ func (ctrl *PersistentVolumeController) provisionClaimOperationExternal( newClaim, err := ctrl.setClaimProvisioner(ctx, claim, provisionerName) if err != nil { // Save failed, the controller will retry in the next sync - klog.V(2).Infof("error saving claim %s: %v", claimToClaimKey(claim), err) + logger.V(2).Info("Error saving claim", "PVC", klog.KObj(claim), "err", err) return provisionerName, err } claim = newClaim @@ -1819,14 +1843,14 @@ func (ctrl *PersistentVolumeController) provisionClaimOperationExternal( // External provisioner has been requested for provisioning the volume // Report an event and wait for external provisioner to finish ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, events.ExternalProvisioning, msg) - klog.V(3).Infof("provisionClaimOperationExternal provisioning claim %q: %s", claimToClaimKey(claim), msg) + logger.V(3).Info("provisionClaimOperationExternal provisioning claim", "PVC", klog.KObj(claim), "msg", msg) // return provisioner name here for metric reporting return provisionerName, nil } // rescheduleProvisioning signal back to the scheduler to retry dynamic provisioning // by removing the AnnSelectedNode annotation -func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.PersistentVolumeClaim) { +func (ctrl *PersistentVolumeController) rescheduleProvisioning(ctx context.Context, claim *v1.PersistentVolumeClaim) { if _, ok := claim.Annotations[storagehelpers.AnnSelectedNode]; !ok { // Provisioning not triggered by the scheduler, skip return @@ -1837,13 +1861,14 @@ func (ctrl *PersistentVolumeController) rescheduleProvisioning(claim *v1.Persist newClaim := claim.DeepCopy() delete(newClaim.Annotations, storagehelpers.AnnSelectedNode) // Try to update the PVC object - if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(context.TODO(), newClaim, metav1.UpdateOptions{}); err != nil { - klog.V(4).Infof("Failed to delete annotation 'storagehelpers.AnnSelectedNode' for PersistentVolumeClaim %q: %v", claimToClaimKey(newClaim), err) + logger := klog.FromContext(ctx) + if _, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(newClaim.Namespace).Update(ctx, newClaim, metav1.UpdateOptions{}); err != nil { + logger.V(4).Info("Failed to delete annotation 'storagehelpers.AnnSelectedNode' for PersistentVolumeClaim", "PVC", klog.KObj(newClaim), "err", err) return } - if _, err := ctrl.storeClaimUpdate(newClaim); err != nil { + if _, err := ctrl.storeClaimUpdate(logger, newClaim); err != nil { // We will get an "claim updated" event soon, this is not a big error - klog.V(4).Infof("Updating PersistentVolumeClaim %q: cannot update internal cache: %v", claimToClaimKey(newClaim), err) + logger.V(4).Info("Updating PersistentVolumeClaim: cannot update internal cache", "PVC", klog.KObj(newClaim), "err", err) } } @@ -1855,8 +1880,8 @@ func (ctrl *PersistentVolumeController) getProvisionedVolumeNameForClaim(claim * // scheduleOperation starts given asynchronous operation on given volume. It // makes sure the operation is already not running. -func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, operation func() error) { - klog.V(4).Infof("scheduleOperation[%s]", operationName) +func (ctrl *PersistentVolumeController) scheduleOperation(logger klog.Logger, operationName string, operation func() error) { + logger.V(4).Info("scheduleOperation", "operationName", operationName) // Poke test code that an operation is just about to get started. if ctrl.preOperationHook != nil { @@ -1867,11 +1892,11 @@ func (ctrl *PersistentVolumeController) scheduleOperation(operationName string, if err != nil { switch { case goroutinemap.IsAlreadyExists(err): - klog.V(4).Infof("operation %q is already running, skipping", operationName) + logger.V(4).Info("Operation is already running, skipping", "operationName", operationName) case exponentialbackoff.IsExponentialBackoff(err): - klog.V(4).Infof("operation %q postponed due to exponential backoff", operationName) + logger.V(4).Info("Operation postponed due to exponential backoff", "operationName", operationName) default: - klog.Errorf("error scheduling operation %q: %v", operationName, err) + logger.Error(err, "Error scheduling operation", "operationName", operationName) } } } diff --git a/pkg/controller/volume/persistentvolume/pv_controller_base.go b/pkg/controller/volume/persistentvolume/pv_controller_base.go index 336203fd004..7e3827d124d 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_base.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_base.go @@ -79,7 +79,7 @@ type ControllerParameters struct { } // NewController creates a new PersistentVolume controller -func NewController(p ControllerParameters) (*PersistentVolumeController, error) { +func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) { eventRecorder := p.EventRecorder var eventBroadcaster record.EventBroadcaster if eventRecorder == nil { @@ -112,9 +112,9 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) p.VolumeInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) }, - DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) }, + AddFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, newObj) }, + DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) }, }, ) controller.volumeLister = p.VolumeInformer.Lister() @@ -122,9 +122,9 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) p.ClaimInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) }, - UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) }, - DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) }, + AddFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) }, + UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, newObj) }, + DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) }, }, ) controller.claimLister = p.ClaimInformer.Lister() @@ -156,53 +156,54 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error) // initializeCaches fills all controller caches with initial data from etcd in // order to have the caches already filled when first addClaim/addVolume to // perform initial synchronization of the controller. -func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) { +func (ctrl *PersistentVolumeController) initializeCaches(logger klog.Logger, volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) { volumeList, err := volumeLister.List(labels.Everything()) if err != nil { - klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) + logger.Error(err, "PersistentVolumeController can't initialize caches") return } for _, volume := range volumeList { volumeClone := volume.DeepCopy() - if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil { - klog.Errorf("error updating volume cache: %v", err) + if _, err = ctrl.storeVolumeUpdate(logger, volumeClone); err != nil { + logger.Error(err, "Error updating volume cache") } } claimList, err := claimLister.List(labels.Everything()) if err != nil { - klog.Errorf("PersistentVolumeController can't initialize caches: %v", err) + logger.Error(err, "PersistentVolumeController can't initialize caches") return } for _, claim := range claimList { - if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil { - klog.Errorf("error updating claim cache: %v", err) + if _, err = ctrl.storeClaimUpdate(logger, claim.DeepCopy()); err != nil { + logger.Error(err, "Error updating claim cache") } } - klog.V(4).Infof("controller initialized") + logger.V(4).Info("Controller initialized") } // enqueueWork adds volume or claim to given work queue. -func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, obj interface{}) { +func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.Interface, obj interface{}) { // Beware of "xxx deleted" events + logger := klog.FromContext(ctx) if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil { obj = unknown.Obj } objName, err := controller.KeyFunc(obj) if err != nil { - klog.Errorf("failed to get key from object: %v", err) + logger.Error(err, "Failed to get key from object") return } - klog.V(5).Infof("enqueued %q for sync", objName) + logger.V(5).Info("Enqueued for sync", "objName", objName) queue.Add(objName) } -func (ctrl *PersistentVolumeController) storeVolumeUpdate(volume interface{}) (bool, error) { - return storeObjectUpdate(ctrl.volumes.store, volume, "volume") +func (ctrl *PersistentVolumeController) storeVolumeUpdate(logger klog.Logger, volume interface{}) (bool, error) { + return storeObjectUpdate(logger, ctrl.volumes.store, volume, "volume") } -func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (bool, error) { - return storeObjectUpdate(ctrl.claims, claim, "claim") +func (ctrl *PersistentVolumeController) storeClaimUpdate(logger klog.Logger, claim interface{}) (bool, error) { + return storeObjectUpdate(logger, ctrl.claims, claim, "claim") } // updateVolume runs in worker thread and handles "volume added", @@ -210,9 +211,10 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (boo func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume *v1.PersistentVolume) { // Store the new volume version in the cache and do not process it if this // is an old version. - new, err := ctrl.storeVolumeUpdate(volume) + logger := klog.FromContext(ctx) + new, err := ctrl.storeVolumeUpdate(logger, volume) if err != nil { - klog.Errorf("%v", err) + logger.Error(err, "") } if !new { return @@ -223,19 +225,20 @@ func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. - klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err) + logger.V(3).Info("Could not sync volume", "volumeName", volume.Name, "err", err) } else { - klog.Errorf("could not sync volume %q: %+v", volume.Name, err) + logger.Error(err, "Could not sync volume", "volumeName", volume.Name, "err", err) } } } // deleteVolume runs in worker thread and handles "volume deleted" event. -func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) { +func (ctrl *PersistentVolumeController) deleteVolume(ctx context.Context, volume *v1.PersistentVolume) { + logger := klog.FromContext(ctx) if err := ctrl.volumes.store.Delete(volume); err != nil { - klog.Errorf("volume %q deletion encountered : %v", volume.Name, err) + logger.Error(err, "Volume deletion encountered", "volumeName", volume.Name) } else { - klog.V(4).Infof("volume %q deleted", volume.Name) + logger.V(4).Info("volume deleted", "volumeName", volume.Name) } // record deletion metric if a deletion start timestamp is in the cache // the following calls will be a no-op if there is nothing for this volume in the cache @@ -249,7 +252,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume // claim here in response to volume deletion prevents the claim from // waiting until the next sync period for its Lost status. claimKey := claimrefToClaimKey(volume.Spec.ClaimRef) - klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey) + logger.V(5).Info("deleteVolume: scheduling sync of claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name) ctrl.claimQueue.Add(claimKey) } @@ -258,9 +261,10 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) { // Store the new claim version in the cache and do not process it if this is // an old version. - new, err := ctrl.storeClaimUpdate(claim) + logger := klog.FromContext(ctx) + new, err := ctrl.storeClaimUpdate(logger, claim) if err != nil { - klog.Errorf("%v", err) + logger.Error(err, "") } if !new { return @@ -270,35 +274,36 @@ func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim * if errors.IsConflict(err) { // Version conflict error happens quite often and the controller // recovers from it easily. - klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err) + logger.V(3).Info("Could not sync claim", "PVC", klog.KObj(claim), "err", err) } else { - klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err) + logger.Error(err, "Could not sync volume", "PVC", klog.KObj(claim)) } } } // Unit test [5-5] [5-6] [5-7] // deleteClaim runs in worker thread and handles "claim deleted" event. -func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) { +func (ctrl *PersistentVolumeController) deleteClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) { + logger := klog.FromContext(ctx) if err := ctrl.claims.Delete(claim); err != nil { - klog.Errorf("claim %q deletion encountered : %v", claim.Name, err) + logger.Error(err, "Claim deletion encountered", "PVC", klog.KObj(claim)) } claimKey := claimToClaimKey(claim) - klog.V(4).Infof("claim %q deleted", claimKey) + logger.V(4).Info("Claim deleted", "PVC", klog.KObj(claim)) // clean any possible unfinished provision start timestamp from cache // Unit test [5-8] [5-9] ctrl.operationTimestamps.Delete(claimKey) volumeName := claim.Spec.VolumeName if volumeName == "" { - klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimKey) + logger.V(5).Info("deleteClaim: volume not bound", "PVC", klog.KObj(claim)) return } // sync the volume when its claim is deleted. Explicitly sync'ing the // volume here in response to claim deletion prevents the volume from // waiting until the next sync period for its Release. - klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimKey, volumeName) + logger.V(5).Info("deleteClaim: scheduling sync of volume", "PVC", klog.KObj(claim), "volumeName", volumeName) ctrl.volumeQueue.Add(volumeName) } @@ -314,17 +319,17 @@ func (ctrl *PersistentVolumeController) Run(ctx context.Context) { ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")}) defer ctrl.eventBroadcaster.Shutdown() } - - klog.Infof("Starting persistent volume controller") - defer klog.Infof("Shutting down persistent volume controller") + logger := klog.FromContext(ctx) + logger.Info("Starting persistent volume controller") + defer logger.Info("Shutting down persistent volume controller") if !cache.WaitForNamedCacheSync("persistent volume", ctx.Done(), ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) { return } - ctrl.initializeCaches(ctrl.volumeLister, ctrl.claimLister) + ctrl.initializeCaches(logger, ctrl.volumeLister, ctrl.claimLister) - go wait.Until(ctrl.resync, ctrl.resyncPeriod, ctx.Done()) + go wait.Until(func() { ctrl.resync(ctx) }, ctrl.resyncPeriod, ctx.Done()) go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second) go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second) @@ -342,7 +347,8 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont // when no modifications are required this function could sometimes return a // copy of the volume and sometimes return a ref to the original claimClone := claim.DeepCopy() - modified := updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true) + logger := klog.FromContext(ctx) + modified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true) if !modified { return claimClone, nil } @@ -350,7 +356,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) } - _, err = ctrl.storeClaimUpdate(newClaim) + _, err = ctrl.storeClaimUpdate(logger, newClaim) if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err) } @@ -360,8 +366,9 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinalizers(ctx context.Context, volume *v1.PersistentVolume) (*v1.PersistentVolume, error) { volumeClone := volume.DeepCopy() - annModified := updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false) - modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(ctrl.csiMigratedPluginManager, volumeClone) + logger := klog.FromContext(ctx) + annModified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false) + modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(logger, ctrl.csiMigratedPluginManager, volumeClone) if !annModified && !finalizersModified { return volumeClone, nil } @@ -372,7 +379,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinal if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err) } - _, err = ctrl.storeVolumeUpdate(newVol) + _, err = ctrl.storeVolumeUpdate(logger, newVol) if err != nil { return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err) } @@ -385,7 +392,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinal // `Recycle`, removing the finalizer is necessary to reflect the recalimPolicy updates on the PV. // The method also removes any external PV Deletion Protection finalizers added on the PV, this represents CSI migration // rollback/disable scenarios. -func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) { +func modifyDeletionFinalizers(logger klog.Logger, cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) { modified := false var outFinalizers []string if !utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) { @@ -416,18 +423,18 @@ func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.Persiste reclaimPolicy := volume.Spec.PersistentVolumeReclaimPolicy // Add back the in-tree PV deletion protection finalizer if does not already exists if reclaimPolicy == v1.PersistentVolumeReclaimDelete && !slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) { - klog.V(4).Infof("Adding in-tree pv deletion protection finalizer on %s", volume.Name) + logger.V(4).Info("Adding in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name) outFinalizers = append(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer) modified = true } else if (reclaimPolicy == v1.PersistentVolumeReclaimRetain || reclaimPolicy == v1.PersistentVolumeReclaimRecycle) && slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) { // Remove the in-tree PV deletion protection finalizer if the reclaim policy is 'Retain' or 'Recycle' - klog.V(4).Infof("Removing in-tree pv deletion protection finalizer on %s", volume.Name) + logger.V(4).Info("Removing in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name) outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) modified = true } // Remove the external PV deletion protection finalizer if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) { - klog.V(4).Infof("Removing external pv deletion protection finalizer on %s", volume.Name) + logger.V(4).Info("Removing external pv deletion protection finalizer on volume", "volumeName", volume.Name) outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) modified = true } @@ -440,7 +447,7 @@ func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.Persiste // driver name for that provisioner is "on" based on feature flags, it will also // remove the annotation is migration is "off" for that provisioner in rollback // scenarios. Returns true if the annotations map was modified and false otherwise. -func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool { +func updateMigrationAnnotations(logger klog.Logger, cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool { var csiDriverName string var err error @@ -473,7 +480,7 @@ func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINam if cmpm.IsMigrationEnabledForPlugin(provisioner) { csiDriverName, err = translator.GetCSINameFromInTreeName(provisioner) if err != nil { - klog.Errorf("Could not update volume migration annotations. Migration enabled for plugin %s but could not find corresponding driver name: %v", provisioner, err) + logger.Error(err, "Could not update volume migration annotations. Migration enabled for plugin but could not find corresponding driver name", "plugin", provisioner) return false } if migratedToDriver != csiDriverName { @@ -493,6 +500,7 @@ func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINam // volumeWorker processes items from volumeQueue. It must run only once, // syncVolume is not assured to be reentrant. func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { + logger := klog.FromContext(ctx) workFunc := func(ctx context.Context) bool { keyObj, quit := ctrl.volumeQueue.Get() if quit { @@ -500,11 +508,11 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { } defer ctrl.volumeQueue.Done(keyObj) key := keyObj.(string) - klog.V(5).Infof("volumeWorker[%s]", key) + logger.V(5).Info("volumeWorker", "volumeKey", key) _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err) + logger.V(4).Info("Error getting name of volume to get volume from informer", "volumeKey", key, "err", err) return false } volume, err := ctrl.volumeLister.Get(name) @@ -515,7 +523,7 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { return false } if !errors.IsNotFound(err) { - klog.V(2).Infof("error getting volume %q from informer: %v", key, err) + logger.V(2).Info("Error getting volume from informer", "volumeKey", key, "err", err) return false } @@ -523,26 +531,26 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { // "delete" volumeObj, found, err := ctrl.volumes.store.GetByKey(key) if err != nil { - klog.V(2).Infof("error getting volume %q from cache: %v", key, err) + logger.V(2).Info("Error getting volume from cache", "volumeKey", key, "err", err) return false } if !found { // The controller has already processed the delete event and // deleted the volume from its cache - klog.V(2).Infof("deletion of volume %q was already processed", key) + logger.V(2).Info("Deletion of volume was already processed", "volumeKey", key) return false } volume, ok := volumeObj.(*v1.PersistentVolume) if !ok { - klog.Errorf("expected volume, got %+v", volumeObj) + logger.Error(nil, "Expected volume, got", "obj", volumeObj) return false } - ctrl.deleteVolume(volume) + ctrl.deleteVolume(ctx, volume) return false } for { if quit := workFunc(ctx); quit { - klog.Infof("volume worker queue shutting down") + logger.Info("Volume worker queue shutting down") return } } @@ -551,6 +559,7 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) { // claimWorker processes items from claimQueue. It must run only once, // syncClaim is not reentrant. func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) { + logger := klog.FromContext(ctx) workFunc := func() bool { keyObj, quit := ctrl.claimQueue.Get() if quit { @@ -558,11 +567,11 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) { } defer ctrl.claimQueue.Done(keyObj) key := keyObj.(string) - klog.V(5).Infof("claimWorker[%s]", key) + logger.V(5).Info("claimWorker", "claimKey", key) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err) + logger.V(4).Info("Error getting namespace & name of claim to get claim from informer", "claimKey", key, "err", err) return false } claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name) @@ -573,33 +582,33 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) { return false } if !errors.IsNotFound(err) { - klog.V(2).Infof("error getting claim %q from informer: %v", key, err) + logger.V(2).Info("Error getting claim from informer", "claimKey", key, "err", err) return false } // The claim is not in informer cache, the event must have been "delete" claimObj, found, err := ctrl.claims.GetByKey(key) if err != nil { - klog.V(2).Infof("error getting claim %q from cache: %v", key, err) + logger.V(2).Info("Error getting claim from cache", "claimKey", key, "err", err) return false } if !found { // The controller has already processed the delete event and // deleted the claim from its cache - klog.V(2).Infof("deletion of claim %q was already processed", key) + logger.V(2).Info("Deletion of claim was already processed", "claimKey", key) return false } claim, ok := claimObj.(*v1.PersistentVolumeClaim) if !ok { - klog.Errorf("expected claim, got %+v", claimObj) + logger.Error(nil, "Expected claim, got", "obj", claimObj) return false } - ctrl.deleteClaim(claim) + ctrl.deleteClaim(ctx, claim) return false } for { if quit := workFunc(); quit { - klog.Infof("claim worker queue shutting down") + logger.Info("Claim worker queue shutting down") return } } @@ -608,25 +617,26 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) { // resync supplements short resync period of shared informers - we don't want // all consumers of PV/PVC shared informer to have a short resync period, // therefore we do our own. -func (ctrl *PersistentVolumeController) resync() { - klog.V(4).Infof("resyncing PV controller") +func (ctrl *PersistentVolumeController) resync(ctx context.Context) { + logger := klog.FromContext(ctx) + logger.V(4).Info("Resyncing PV controller") pvcs, err := ctrl.claimLister.List(labels.NewSelector()) if err != nil { - klog.Warningf("cannot list claims: %s", err) + logger.Info("Cannot list claims", "err", err) return } for _, pvc := range pvcs { - ctrl.enqueueWork(ctrl.claimQueue, pvc) + ctrl.enqueueWork(ctx, ctrl.claimQueue, pvc) } pvs, err := ctrl.volumeLister.List(labels.NewSelector()) if err != nil { - klog.Warningf("cannot list persistent volumes: %s", err) + logger.Info("Cannot list persistent volumes", "err", err) return } for _, pv := range pvs { - ctrl.enqueueWork(ctrl.volumeQueue, pv) + ctrl.enqueueWork(ctx, ctrl.volumeQueue, pv) } } @@ -642,14 +652,15 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(ctx context.Context, // modify these, therefore create a copy. claimClone := claim.DeepCopy() // TODO: remove the beta storage provisioner anno after the deprecation period + logger := klog.FromContext(ctx) metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnBetaStorageProvisioner, provisionerName) metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnStorageProvisioner, provisionerName) - updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true) - newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{}) + updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true) + newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{}) if err != nil { return newClaim, err } - _, err = ctrl.storeClaimUpdate(newClaim) + _, err = ctrl.storeClaimUpdate(logger, newClaim) if err != nil { return newClaim, err } @@ -678,7 +689,7 @@ func getVolumeStatusForLogging(volume *v1.PersistentVolume) string { // callback (i.e. with events from etcd) or with an object modified by the // controller itself. Returns "true", if the cache was updated, false if the // object is an old version and should be ignored. -func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) { +func storeObjectUpdate(logger klog.Logger, store cache.Store, obj interface{}, className string) (bool, error) { objName, err := controller.KeyFunc(obj) if err != nil { return false, fmt.Errorf("couldn't get key for object %+v: %w", obj, err) @@ -692,10 +703,9 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo if err != nil { return false, err } - if !found { // This is a new object - klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion()) + logger.V(4).Info("storeObjectUpdate, adding obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion()) if err = store.Add(obj); err != nil { return false, fmt.Errorf("error adding %s %q to controller cache: %w", className, objName, err) } @@ -719,11 +729,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo // Throw away only older version, let the same version pass - we do want to // get periodic sync events. if oldObjResourceVersion > objResourceVersion { - klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion()) + logger.V(4).Info("storeObjectUpdate: ignoring obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion()) return false, nil } - klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion()) + logger.V(4).Info("storeObjectUpdate updating obj with version", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion()) if err = store.Update(obj); err != nil { return false, fmt.Errorf("error updating %s %q in controller cache: %w", className, objName, err) } diff --git a/pkg/controller/volume/persistentvolume/pv_controller_test.go b/pkg/controller/volume/persistentvolume/pv_controller_test.go index 5d3716ed40b..481d84579d9 100644 --- a/pkg/controller/volume/persistentvolume/pv_controller_test.go +++ b/pkg/controller/volume/persistentvolume/pv_controller_test.go @@ -38,6 +38,7 @@ import ( "k8s.io/component-helpers/storage/volume" csitrans "k8s.io/csi-translation-lib" "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" "k8s.io/kubernetes/pkg/features" @@ -310,7 +311,7 @@ func TestControllerSync(t *testing.T) { }, }, } - + _, ctx := ktesting.NewTestContext(t) doit := func(test controllerTest) { // Initialize the controller client := &fake.Clientset{} @@ -324,7 +325,7 @@ func TestControllerSync(t *testing.T) { client.PrependWatchReactor("pods", core.DefaultWatchReactor(watch.NewFake(), nil)) informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) - ctrl, err := newTestController(client, informers, true) + ctrl, err := newTestController(ctx, client, informers, true) if err != nil { t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err) } @@ -341,7 +342,7 @@ func TestControllerSync(t *testing.T) { } ctrl.classLister = storagelisters.NewStorageClassLister(indexer) - reactor := newVolumeReactor(client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors) + reactor := newVolumeReactor(ctx, client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors) for _, claim := range test.initialClaims { claim = claim.DeepCopy() reactor.AddClaim(claim) @@ -380,7 +381,7 @@ func TestControllerSync(t *testing.T) { } // Simulate a periodic resync, just in case some events arrived in a // wrong order. - ctrl.resync() + ctrl.resync(ctx) err = reactor.waitTest(test) if err != nil { @@ -388,7 +389,7 @@ func TestControllerSync(t *testing.T) { } cancel() - evaluateTestResults(ctrl, reactor.VolumeReactor, test, t) + evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t) } for _, test := range tests { @@ -402,7 +403,8 @@ func TestControllerSync(t *testing.T) { func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) { pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty) pv.ResourceVersion = version - ret, err := storeObjectUpdate(c, pv, "volume") + logger, _ := ktesting.NewTestContext(t) + ret, err := storeObjectUpdate(logger, c, pv, "volume") if err != nil { t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err) } @@ -461,7 +463,8 @@ func TestControllerCacheParsingError(t *testing.T) { pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty) pv.ResourceVersion = "xxx" - _, err := storeObjectUpdate(c, pv, "volume") + logger, _ := ktesting.NewTestContext(t) + _, err := storeObjectUpdate(logger, c, pv, "volume") if err == nil { t.Errorf("Expected parsing error, got nil instead") } @@ -572,19 +575,19 @@ func TestAnnealMigrationAnnotations(t *testing.T) { translator := csitrans.New() cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate) - + logger, _ := ktesting.NewTestContext(t) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { if tc.volumeAnnotations != nil { ann := tc.volumeAnnotations - updateMigrationAnnotations(cmpm, translator, ann, false) + updateMigrationAnnotations(logger, cmpm, translator, ann, false) if !reflect.DeepEqual(tc.expVolumeAnnotations, ann) { t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations) } } if tc.claimAnnotations != nil { ann := tc.claimAnnotations - updateMigrationAnnotations(cmpm, translator, ann, true) + updateMigrationAnnotations(logger, cmpm, translator, ann, true) if !reflect.DeepEqual(tc.expClaimAnnotations, ann) { t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations) } @@ -732,13 +735,13 @@ func TestModifyDeletionFinalizers(t *testing.T) { translator := csitrans.New() cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate) - + logger, _ := ktesting.NewTestContext(t) for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { if tc.volumeAnnotations != nil { tc.initialVolume.SetAnnotations(tc.volumeAnnotations) } - modifiedFinalizers, modified := modifyDeletionFinalizers(cmpm, tc.initialVolume) + modifiedFinalizers, modified := modifyDeletionFinalizers(logger, cmpm, tc.initialVolume) if modified != tc.expModified { t.Errorf("got modified: %v, but expected: %v", modified, tc.expModified) } @@ -881,7 +884,8 @@ func TestRetroactiveStorageClassAssignment(t *testing.T) { }, }, } + _, ctx := ktesting.NewTestContext(t) for _, test := range tests { - runSyncTests(t, test.tests, test.storageClasses, nil) + runSyncTests(t, ctx, test.tests, test.storageClasses, nil) } } diff --git a/pkg/controller/volume/persistentvolume/recycle_test.go b/pkg/controller/volume/persistentvolume/recycle_test.go index c1d0c86b24e..13d8824fa61 100644 --- a/pkg/controller/volume/persistentvolume/recycle_test.go +++ b/pkg/controller/volume/persistentvolume/recycle_test.go @@ -24,6 +24,7 @@ import ( storage "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/component-helpers/storage/volume" + "k8s.io/klog/v2/ktesting" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" ) @@ -32,6 +33,7 @@ import ( // 2. Call the syncVolume *once*. // 3. Compare resulting volumes with expected volumes. func TestRecycleSync(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) runningPod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "runningPod", @@ -139,7 +141,7 @@ func TestRecycleSync(t *testing.T) { expectedClaims: noclaims, expectedEvents: noevents, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Delete the volume before recycle operation starts reactor.DeleteVolume("volume6-6") }), @@ -155,7 +157,7 @@ func TestRecycleSync(t *testing.T) { expectedClaims: noclaims, expectedEvents: noevents, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Mark the volume as Available before the recycler starts reactor.MarkVolumeAvailable("volume6-7") }), @@ -172,7 +174,7 @@ func TestRecycleSync(t *testing.T) { expectedClaims: noclaims, expectedEvents: noevents, errors: noerrors, - test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { + test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) { // Mark the volume as Available before the recycler starts reactor.MarkVolumeAvailable("volume6-8") }), @@ -249,7 +251,7 @@ func TestRecycleSync(t *testing.T) { test: wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume), }, } - runSyncTests(t, tests, []*storage.StorageClass{}, pods) + runSyncTests(t, ctx, tests, []*storage.StorageClass{}, pods) } // Test multiple calls to syncClaim/syncVolume and periodic sync of all @@ -268,6 +270,7 @@ func TestRecycleSync(t *testing.T) { // // Some limit of calls in enforced to prevent endless loops. func TestRecycleMultiSync(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) tests := []controllerTest{ { // recycle failure - recycle returns error. The controller should @@ -282,5 +285,5 @@ func TestRecycleMultiSync(t *testing.T) { }, } - runMultisyncTests(t, tests, []*storage.StorageClass{}, "") + runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "") } diff --git a/pkg/controller/volume/persistentvolume/testing/testing.go b/pkg/controller/volume/persistentvolume/testing/testing.go index 4f24b5c46a9..73711e3fed7 100644 --- a/pkg/controller/volume/persistentvolume/testing/testing.go +++ b/pkg/controller/volume/persistentvolume/testing/testing.go @@ -17,8 +17,10 @@ limitations under the License. package testing import ( + "context" "errors" "fmt" + "k8s.io/klog/v2" "reflect" "strconv" "sync" @@ -32,7 +34,6 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - "k8s.io/klog/v2" ) // ErrVersionConflict is the error returned when resource version of requested @@ -87,14 +88,14 @@ type ReactorError struct { // to evaluate test results. // All updated objects are also inserted into changedObjects queue and // optionally sent back to the controller via its watchers. -func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Object, err error) { +func (r *VolumeReactor) React(ctx context.Context, action core.Action) (handled bool, ret runtime.Object, err error) { r.lock.Lock() defer r.lock.Unlock() - - klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource()) + logger := klog.FromContext(ctx) + logger.V(4).Info("Reactor got operation", "resource", action.GetResource(), "verb", action.GetVerb()) // Inject error when requested - err = r.injectReactError(action) + err = r.injectReactError(ctx, action) if err != nil { return true, nil, err } @@ -124,7 +125,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj } r.changedObjects = append(r.changedObjects, volume) r.changedSinceLastSync++ - klog.V(4).Infof("created volume %s", volume.Name) + logger.V(4).Info("Created volume", "volumeName", volume.Name) return true, volume, nil case action.Matches("create", "persistentvolumeclaims"): @@ -144,7 +145,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj } r.changedObjects = append(r.changedObjects, claim) r.changedSinceLastSync++ - klog.V(4).Infof("created claim %s", claim.Name) + logger.V(4).Info("Created claim", "PVC", klog.KObj(claim)) return true, claim, nil case action.Matches("update", "persistentvolumes"): @@ -160,7 +161,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj return true, obj, ErrVersionConflict } if reflect.DeepEqual(storedVolume, volume) { - klog.V(4).Infof("nothing updated volume %s", volume.Name) + logger.V(4).Info("Nothing updated volume", "volumeName", volume.Name) return true, volume, nil } // Don't modify the existing object @@ -177,7 +178,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj r.volumes[volume.Name] = volume r.changedObjects = append(r.changedObjects, volume) r.changedSinceLastSync++ - klog.V(4).Infof("saved updated volume %s", volume.Name) + logger.V(4).Info("Saved updated volume", "volumeName", volume.Name) return true, volume, nil case action.Matches("update", "persistentvolumeclaims"): @@ -193,7 +194,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj return true, obj, ErrVersionConflict } if reflect.DeepEqual(storedClaim, claim) { - klog.V(4).Infof("nothing updated claim %s", claim.Name) + logger.V(4).Info("Nothing updated claim", "PVC", klog.KObj(claim)) return true, claim, nil } // Don't modify the existing object @@ -210,32 +211,33 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj r.claims[claim.Name] = claim r.changedObjects = append(r.changedObjects, claim) r.changedSinceLastSync++ - klog.V(4).Infof("saved updated claim %s", claim.Name) + logger.V(4).Info("Saved updated claim", "PVC", klog.KObj(claim)) return true, claim, nil case action.Matches("get", "persistentvolumes"): name := action.(core.GetAction).GetName() volume, found := r.volumes[name] if found { - klog.V(4).Infof("GetVolume: found %s", volume.Name) + logger.V(4).Info("GetVolume: found volume", "volumeName", volume.Name) return true, volume.DeepCopy(), nil } - klog.V(4).Infof("GetVolume: volume %s not found", name) + logger.V(4).Info("GetVolume: volume not found", "volumeName", name) return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name) case action.Matches("get", "persistentvolumeclaims"): name := action.(core.GetAction).GetName() + nameSpace := action.(core.GetAction).GetNamespace() claim, found := r.claims[name] if found { - klog.V(4).Infof("GetClaim: found %s", claim.Name) + logger.V(4).Info("GetClaim: found claim", "PVC", klog.KObj(claim)) return true, claim.DeepCopy(), nil } - klog.V(4).Infof("GetClaim: claim %s not found", name) + logger.V(4).Info("GetClaim: claim not found", "PVC", klog.KRef(nameSpace, name)) return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name) case action.Matches("delete", "persistentvolumes"): name := action.(core.DeleteAction).GetName() - klog.V(4).Infof("deleted volume %s", name) + logger.V(4).Info("Deleted volume", "volumeName", name) obj, found := r.volumes[name] if found { delete(r.volumes, name) @@ -249,7 +251,8 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj case action.Matches("delete", "persistentvolumeclaims"): name := action.(core.DeleteAction).GetName() - klog.V(4).Infof("deleted claim %s", name) + nameSpace := action.(core.DeleteAction).GetNamespace() + logger.V(4).Info("Deleted claim", "PVC", klog.KRef(nameSpace, name)) obj, found := r.claims[name] if found { delete(r.claims, name) @@ -297,18 +300,18 @@ func (r *VolumeReactor) getWatches(gvr schema.GroupVersionResource, ns string) [ // injectReactError returns an error when the test requested given action to // fail. nil is returned otherwise. -func (r *VolumeReactor) injectReactError(action core.Action) error { +func (r *VolumeReactor) injectReactError(ctx context.Context, action core.Action) error { if len(r.errors) == 0 { // No more errors to inject, everything should succeed. return nil } - + logger := klog.FromContext(ctx) for i, expected := range r.errors { - klog.V(4).Infof("trying to match %q %q with %q %q", expected.Verb, expected.Resource, action.GetVerb(), action.GetResource()) + logger.V(4).Info("Trying to match resource verb", "resource", action.GetResource(), "verb", action.GetVerb(), "expectedResource", expected.Resource, "expectedVerb", expected.Verb) if action.Matches(expected.Verb, expected.Resource) { // That's the action we're waiting for, remove it from injectedErrors r.errors = append(r.errors[:i], r.errors[i+1:]...) - klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.Verb, expected.Resource, expected.Error) + logger.V(4).Info("Reactor found matching error", "index", i, "expectedResource", expected.Resource, "expectedVerb", expected.Verb, "err", expected.Error) return expected.Error } } @@ -382,7 +385,7 @@ func (r *VolumeReactor) CheckClaims(expectedClaims []*v1.PersistentVolumeClaim) // PopChange returns one recorded updated object, either *v1.PersistentVolume // or *v1.PersistentVolumeClaim. Returns nil when there are no changes. -func (r *VolumeReactor) PopChange() interface{} { +func (r *VolumeReactor) PopChange(ctx context.Context) interface{} { r.lock.Lock() defer r.lock.Unlock() @@ -391,14 +394,15 @@ func (r *VolumeReactor) PopChange() interface{} { } // For debugging purposes, print the queue + logger := klog.FromContext(ctx) for _, obj := range r.changedObjects { switch obj.(type) { case *v1.PersistentVolume: vol, _ := obj.(*v1.PersistentVolume) - klog.V(4).Infof("reactor queue: %s", vol.Name) + logger.V(4).Info("Reactor queue", "volumeName", vol.Name) case *v1.PersistentVolumeClaim: claim, _ := obj.(*v1.PersistentVolumeClaim) - klog.V(4).Infof("reactor queue: %s", claim.Name) + logger.V(4).Info("Reactor queue", "PVC", klog.KObj(claim)) } } @@ -539,7 +543,7 @@ func (r *VolumeReactor) MarkVolumeAvailable(name string) { } // NewVolumeReactor creates a volume reactor. -func NewVolumeReactor(client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []ReactorError) *VolumeReactor { +func NewVolumeReactor(ctx context.Context, client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []ReactorError) *VolumeReactor { reactor := &VolumeReactor{ volumes: make(map[string]*v1.PersistentVolume), claims: make(map[string]*v1.PersistentVolumeClaim), @@ -548,13 +552,30 @@ func NewVolumeReactor(client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *w errors: errors, watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher), } - client.AddReactor("create", "persistentvolumes", reactor.React) - client.AddReactor("create", "persistentvolumeclaims", reactor.React) - client.AddReactor("update", "persistentvolumes", reactor.React) - client.AddReactor("update", "persistentvolumeclaims", reactor.React) - client.AddReactor("get", "persistentvolumes", reactor.React) - client.AddReactor("get", "persistentvolumeclaims", reactor.React) - client.AddReactor("delete", "persistentvolumes", reactor.React) - client.AddReactor("delete", "persistentvolumeclaims", reactor.React) + client.AddReactor("create", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) + + client.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) + client.AddReactor("update", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) + client.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) + client.AddReactor("get", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) + client.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) + client.AddReactor("delete", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) + client.AddReactor("delete", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return reactor.React(ctx, action) + }) return reactor } diff --git a/pkg/controller/volume/persistentvolume/volume_host.go b/pkg/controller/volume/persistentvolume/volume_host.go index b47f183a61d..49b6d3e3df4 100644 --- a/pkg/controller/volume/persistentvolume/volume_host.go +++ b/pkg/controller/volume/persistentvolume/volume_host.go @@ -123,7 +123,7 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) { return func(types.UID) { - klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController") + klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in PersistentVolumeController") } } diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go index 46175eb2477..ccf42db1875 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller.go @@ -55,7 +55,7 @@ type Controller struct { } // NewPVCProtectionController returns a new instance of PVCProtectionController. -func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) { +func NewPVCProtectionController(logger klog.Logger, pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) { e := &Controller{ client: cl, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"), @@ -64,9 +64,11 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI e.pvcLister = pvcInformer.Lister() e.pvcListerSynced = pvcInformer.Informer().HasSynced pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: e.pvcAddedUpdated, + AddFunc: func(obj interface{}) { + e.pvcAddedUpdated(logger, obj) + }, UpdateFunc: func(old, new interface{}) { - e.pvcAddedUpdated(new) + e.pvcAddedUpdated(logger, new) }, }) @@ -78,13 +80,13 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI } podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - e.podAddedDeletedUpdated(nil, obj, false) + e.podAddedDeletedUpdated(logger, nil, obj, false) }, DeleteFunc: func(obj interface{}) { - e.podAddedDeletedUpdated(nil, obj, true) + e.podAddedDeletedUpdated(logger, nil, obj, true) }, UpdateFunc: func(old, new interface{}) { - e.podAddedDeletedUpdated(old, new, false) + e.podAddedDeletedUpdated(logger, old, new, false) }, }) @@ -96,8 +98,9 @@ func (c *Controller) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.InfoS("Starting PVC protection controller") - defer klog.InfoS("Shutting down PVC protection controller") + logger := klog.FromContext(ctx) + logger.Info("Starting PVC protection controller") + defer logger.Info("Shutting down PVC protection controller") if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) { return @@ -142,15 +145,16 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool { } func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName string) error { - klog.V(4).InfoS("Processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName)) + logger := klog.FromContext(ctx) + logger.V(4).Info("Processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName)) startTime := time.Now() defer func() { - klog.V(4).InfoS("Finished processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName), "duration", time.Since(startTime)) + logger.V(4).Info("Finished processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName), "duration", time.Since(startTime)) }() pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName) if apierrors.IsNotFound(err) { - klog.V(4).InfoS("PVC not found, ignoring", "PVC", klog.KRef(pvcNamespace, pvcName)) + logger.V(4).Info("PVC not found, ignoring", "PVC", klog.KRef(pvcNamespace, pvcName)) return nil } if err != nil { @@ -167,7 +171,7 @@ func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName strin if !isUsed { return c.removeFinalizer(ctx, pvc) } - klog.V(2).InfoS("Keeping PVC because it is being used", "PVC", klog.KObj(pvc)) + logger.V(2).Info("Keeping PVC because it is being used", "PVC", klog.KObj(pvc)) } if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) { @@ -184,11 +188,12 @@ func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeC claimClone := pvc.DeepCopy() claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{}) + logger := klog.FromContext(ctx) if err != nil { - klog.ErrorS(err, "Error adding protection finalizer to PVC", "PVC", klog.KObj(pvc)) + logger.Error(err, "Error adding protection finalizer to PVC", "PVC", klog.KObj(pvc)) return err } - klog.V(3).InfoS("Added protection finalizer to PVC", "PVC", klog.KObj(pvc)) + logger.V(3).Info("Added protection finalizer to PVC", "PVC", klog.KObj(pvc)) return nil } @@ -196,11 +201,12 @@ func (c *Controller) removeFinalizer(ctx context.Context, pvc *v1.PersistentVolu claimClone := pvc.DeepCopy() claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil) _, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{}) + logger := klog.FromContext(ctx) if err != nil { - klog.ErrorS(err, "Error removing protection finalizer from PVC", "PVC", klog.KObj(pvc)) + logger.Error(err, "Error removing protection finalizer from PVC", "PVC", klog.KObj(pvc)) return err } - klog.V(3).InfoS("Removed protection finalizer from PVC", "PVC", klog.KObj(pvc)) + logger.V(3).Info("Removed protection finalizer from PVC", "PVC", klog.KObj(pvc)) return nil } @@ -208,9 +214,10 @@ func (c *Controller) isBeingUsed(ctx context.Context, pvc *v1.PersistentVolumeCl // Look for a Pod using pvc in the Informer's cache. If one is found the // correct decision to keep pvc is taken without doing an expensive live // list. - if inUse, err := c.askInformer(pvc); err != nil { + logger := klog.FromContext(ctx) + if inUse, err := c.askInformer(logger, pvc); err != nil { // No need to return because a live list will follow. - klog.Error(err) + logger.Error(err, "") } else if inUse { return true, nil } @@ -222,8 +229,8 @@ func (c *Controller) isBeingUsed(ctx context.Context, pvc *v1.PersistentVolumeCl return c.askAPIServer(ctx, pvc) } -func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) { - klog.V(4).InfoS("Looking for Pods using PVC in the Informer's cache", "PVC", klog.KObj(pvc)) +func (c *Controller) askInformer(logger klog.Logger, pvc *v1.PersistentVolumeClaim) (bool, error) { + logger.V(4).Info("Looking for Pods using PVC in the Informer's cache", "PVC", klog.KObj(pvc)) // The indexer is used to find pods which might use the PVC. objs, err := c.podIndexer.ByIndex(common.PodPVCIndex, fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name)) @@ -239,17 +246,18 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) { // We still need to look at each volume: that's redundant for volume.PersistentVolumeClaim, // but for volume.Ephemeral we need to be sure that this particular PVC is the one // created for the ephemeral volume. - if c.podUsesPVC(pod, pvc) { + if c.podUsesPVC(logger, pod, pvc) { return true, nil } } - klog.V(4).InfoS("No Pod using PVC was found in the Informer's cache", "PVC", klog.KObj(pvc)) + logger.V(4).Info("No Pod using PVC was found in the Informer's cache", "PVC", klog.KObj(pvc)) return false, nil } func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeClaim) (bool, error) { - klog.V(4).InfoS("Looking for Pods using PVC with a live list", "PVC", klog.KObj(pvc)) + logger := klog.FromContext(ctx) + logger.V(4).Info("Looking for Pods using PVC with a live list", "PVC", klog.KObj(pvc)) podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(ctx, metav1.ListOptions{}) if err != nil { @@ -257,16 +265,16 @@ func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeC } for _, pod := range podsList.Items { - if c.podUsesPVC(&pod, pvc) { + if c.podUsesPVC(logger, &pod, pvc) { return true, nil } } - klog.V(2).InfoS("PVC is unused", "PVC", klog.KObj(pvc)) + logger.V(2).Info("PVC is unused", "PVC", klog.KObj(pvc)) return false, nil } -func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool { +func (c *Controller) podUsesPVC(logger klog.Logger, pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool { // Check whether pvc is used by pod only if pod is scheduled, because // kubelet sees pods after they have been scheduled and it won't allow // starting a pod referencing a PVC with a non-nil deletionTimestamp. @@ -274,7 +282,7 @@ func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == pvc.Name || !podIsShutDown(pod) && volume.Ephemeral != nil && ephemeral.VolumeClaimName(pod, &volume) == pvc.Name && ephemeral.VolumeIsForPod(pod, pvc) == nil { - klog.V(2).InfoS("Pod uses PVC", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc)) + logger.V(2).Info("Pod uses PVC", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc)) return true } } @@ -313,7 +321,7 @@ func podIsShutDown(pod *v1.Pod) bool { } // pvcAddedUpdated reacts to pvc added/updated events -func (c *Controller) pvcAddedUpdated(obj interface{}) { +func (c *Controller) pvcAddedUpdated(logger klog.Logger, obj interface{}) { pvc, ok := obj.(*v1.PersistentVolumeClaim) if !ok { utilruntime.HandleError(fmt.Errorf("PVC informer returned non-PVC object: %#v", obj)) @@ -324,7 +332,7 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) { utilruntime.HandleError(fmt.Errorf("couldn't get key for Persistent Volume Claim %#v: %v", pvc, err)) return } - klog.V(4).InfoS("Got event on PVC", "pvc", klog.KObj(pvc)) + logger.V(4).Info("Got event on PVC", "pvc", klog.KObj(pvc)) if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) || protectionutil.IsDeletionCandidate(pvc, volumeutil.PVCProtectionFinalizer) { c.queue.Add(key) @@ -332,9 +340,9 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) { } // podAddedDeletedUpdated reacts to Pod events -func (c *Controller) podAddedDeletedUpdated(old, new interface{}, deleted bool) { +func (c *Controller) podAddedDeletedUpdated(logger klog.Logger, old, new interface{}, deleted bool) { if pod := c.parsePod(new); pod != nil { - c.enqueuePVCs(pod, deleted) + c.enqueuePVCs(logger, pod, deleted) // An update notification might mask the deletion of a pod X and the // following creation of a pod Y with the same namespaced name as X. If @@ -342,7 +350,7 @@ func (c *Controller) podAddedDeletedUpdated(old, new interface{}, deleted bool) // where it is blocking deletion of a PVC not referenced by Y, otherwise // such PVC will never be deleted. if oldPod := c.parsePod(old); oldPod != nil && oldPod.UID != pod.UID { - c.enqueuePVCs(oldPod, true) + c.enqueuePVCs(logger, oldPod, true) } } } @@ -367,13 +375,13 @@ func (*Controller) parsePod(obj interface{}) *v1.Pod { return pod } -func (c *Controller) enqueuePVCs(pod *v1.Pod, deleted bool) { +func (c *Controller) enqueuePVCs(logger klog.Logger, pod *v1.Pod, deleted bool) { // Filter out pods that can't help us to remove a finalizer on PVC if !deleted && !volumeutil.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" { return } - klog.V(4).InfoS("Enqueuing PVCs for Pod", "pod", klog.KObj(pod), "podUID", pod.UID) + logger.V(4).Info("Enqueuing PVCs for Pod", "pod", klog.KObj(pod), "podUID", pod.UID) // Enqueue all PVCs that the pod uses for _, volume := range pod.Spec.Volumes { diff --git a/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go b/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go index a0b25061f73..2b0f144fa07 100644 --- a/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go +++ b/pkg/controller/volume/pvcprotection/pvc_protection_controller_test.go @@ -36,6 +36,7 @@ import ( "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -399,7 +400,8 @@ func TestPVCProtectionController(t *testing.T) { podInformer := informers.Core().V1().Pods() // Create the controller - ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client) + logger, _ := ktesting.NewTestContext(t) + ctrl, err := NewPVCProtectionController(logger, pvcInformer, podInformer, client) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -424,15 +426,15 @@ func TestPVCProtectionController(t *testing.T) { // Start the test by simulating an event if test.updatedPVC != nil { - ctrl.pvcAddedUpdated(test.updatedPVC) + ctrl.pvcAddedUpdated(logger, test.updatedPVC) } switch { case test.deletedPod != nil && test.updatedPod != nil && test.deletedPod.Namespace == test.updatedPod.Namespace && test.deletedPod.Name == test.updatedPod.Name: - ctrl.podAddedDeletedUpdated(test.deletedPod, test.updatedPod, false) + ctrl.podAddedDeletedUpdated(logger, test.deletedPod, test.updatedPod, false) case test.updatedPod != nil: - ctrl.podAddedDeletedUpdated(nil, test.updatedPod, false) + ctrl.podAddedDeletedUpdated(logger, nil, test.updatedPod, false) case test.deletedPod != nil: - ctrl.podAddedDeletedUpdated(nil, test.deletedPod, true) + ctrl.podAddedDeletedUpdated(logger, nil, test.deletedPod, true) } // Process the controller queue until we get expected results diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller.go b/pkg/controller/volume/pvprotection/pv_protection_controller.go index 215d0204329..d45b87190d2 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller.go @@ -49,7 +49,7 @@ type Controller struct { } // NewPVProtectionController returns a new *Controller. -func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller { +func NewPVProtectionController(logger klog.Logger, pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller { e := &Controller{ client: cl, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"), @@ -58,9 +58,11 @@ func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer e.pvLister = pvInformer.Lister() e.pvListerSynced = pvInformer.Informer().HasSynced pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: e.pvAddedUpdated, + AddFunc: func(obj interface{}) { + e.pvAddedUpdated(logger, obj) + }, UpdateFunc: func(old, new interface{}) { - e.pvAddedUpdated(new) + e.pvAddedUpdated(logger, new) }, }) @@ -72,8 +74,9 @@ func (c *Controller) Run(ctx context.Context, workers int) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting PV protection controller") - defer klog.Infof("Shutting down PV protection controller") + logger := klog.FromContext(ctx) + logger.Info("Starting PV protection controller") + defer logger.Info("Shutting down PV protection controller") if !cache.WaitForNamedCacheSync("PV protection", ctx.Done(), c.pvListerSynced) { return @@ -114,15 +117,16 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool { } func (c *Controller) processPV(ctx context.Context, pvName string) error { - klog.V(4).Infof("Processing PV %s", pvName) + logger := klog.FromContext(ctx) + logger.V(4).Info("Processing PV", "PV", klog.KRef("", pvName)) startTime := time.Now() defer func() { - klog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime)) + logger.V(4).Info("Finished processing PV", "PV", klog.KRef("", pvName), "cost", time.Since(startTime)) }() pv, err := c.pvLister.Get(pvName) if apierrors.IsNotFound(err) { - klog.V(4).Infof("PV %s not found, ignoring", pvName) + logger.V(4).Info("PV not found, ignoring", "PV", klog.KRef("", pvName)) return nil } if err != nil { @@ -136,7 +140,7 @@ func (c *Controller) processPV(ctx context.Context, pvName string) error { if !isUsed { return c.removeFinalizer(ctx, pv) } - klog.V(4).Infof("Keeping PV %s because it is being used", pvName) + logger.V(4).Info("Keeping PV because it is being used", "PV", klog.KRef("", pvName)) } if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) { @@ -153,11 +157,12 @@ func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume) pvClone := pv.DeepCopy() pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer) _, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{}) + logger := klog.FromContext(ctx) if err != nil { - klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err) + logger.V(3).Info("Error adding protection finalizer to PV", "PV", klog.KObj(pv), "err", err) return err } - klog.V(3).Infof("Added protection finalizer to PV %s", pv.Name) + logger.V(3).Info("Added protection finalizer to PV", "PV", klog.KObj(pv)) return nil } @@ -165,11 +170,12 @@ func (c *Controller) removeFinalizer(ctx context.Context, pv *v1.PersistentVolum pvClone := pv.DeepCopy() pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil) _, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{}) + logger := klog.FromContext(ctx) if err != nil { - klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err) + logger.V(3).Info("Error removing protection finalizer from PV", "PV", klog.KObj(pv), "err", err) return err } - klog.V(3).Infof("Removed protection finalizer from PV %s", pv.Name) + logger.V(3).Info("Removed protection finalizer from PV", "PV", klog.KObj(pv)) return nil } @@ -185,13 +191,13 @@ func (c *Controller) isBeingUsed(pv *v1.PersistentVolume) bool { } // pvAddedUpdated reacts to pv added/updated events -func (c *Controller) pvAddedUpdated(obj interface{}) { +func (c *Controller) pvAddedUpdated(logger klog.Logger, obj interface{}) { pv, ok := obj.(*v1.PersistentVolume) if !ok { utilruntime.HandleError(fmt.Errorf("PV informer returned non-PV object: %#v", obj)) return } - klog.V(4).Infof("Got event on PV %s", pv.Name) + logger.V(4).Info("Got event on PV", "PV", klog.KObj(pv)) if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) || protectionutil.IsDeletionCandidate(pv, volumeutil.PVProtectionFinalizer) { c.queue.Add(pv.Name) diff --git a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go index b0645781820..d122b890158 100644 --- a/pkg/controller/volume/pvprotection/pv_protection_controller_test.go +++ b/pkg/controller/volume/pvprotection/pv_protection_controller_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/kubernetes/fake" clienttesting "k8s.io/client-go/testing" "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -210,11 +211,12 @@ func TestPVProtectionController(t *testing.T) { } // Create the controller - ctrl := NewPVProtectionController(pvInformer, client) + logger, _ := ktesting.NewTestContext(t) + ctrl := NewPVProtectionController(logger, pvInformer, client) // Start the test by simulating an event if test.updatedPV != nil { - ctrl.pvAddedUpdated(test.updatedPV) + ctrl.pvAddedUpdated(logger, test.updatedPV) } // Process the controller queue until we get expected results diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index da30ead630f..90217a102d7 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -359,12 +359,13 @@ type mountedPod struct { } func (asw *actualStateOfWorld) MarkVolumeAsAttached( + logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error { return asw.addVolume(volumeName, volumeSpec, devicePath) } func (asw *actualStateOfWorld) MarkVolumeAsUncertain( - volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error { + logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error { return nil } @@ -473,7 +474,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted(markVolumeOpts operationexecu return asw.AddPodToVolume(markVolumeOpts) } -func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) { +func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) { // no operation for kubelet side } @@ -770,7 +771,7 @@ func (asw *actualStateOfWorld) SetDeviceMountState( return nil } -func (asw *actualStateOfWorld) InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) { +func (asw *actualStateOfWorld) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) { asw.Lock() defer asw.Unlock() diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go index e69018c4d1c..2fe292e7c98 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" @@ -71,7 +72,8 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) { } // Act - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) // Assert if err != nil { @@ -115,7 +117,8 @@ func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing. volumeName := v1.UniqueVolumeName("this-would-never-be-a-volume-name") // Act - err := asw.MarkVolumeAsAttached(volumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err := asw.MarkVolumeAsAttached(logger, volumeName, volumeSpec, "" /* nodeName */, devicePath) // Assert if err != nil { @@ -159,14 +162,14 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) { if err != nil { t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: Actual: <%v>", err) } - - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } // Act - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) // Assert if err != nil { @@ -210,8 +213,8 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) { if err != nil { t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: Actual: <%v>", err) } - - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -286,8 +289,8 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) { if err != nil { t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: Actual: <%v>", err) } - - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -394,8 +397,8 @@ func Test_AddTwoPodsToVolume_Positive(t *testing.T) { generatedVolumeName1, generatedVolumeName2, volumeSpec1, volumeSpec2) } - - err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -534,8 +537,8 @@ func TestActualStateOfWorld_FoundDuringReconstruction(t *testing.T) { generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec( plugin, volumeSpec1) require.NoError(t, err) - - err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -611,8 +614,9 @@ func Test_MarkVolumeAsDetached_Negative_PodInVolume(t *testing.T) { }, }, } + logger, _ := ktesting.NewTestContext(t) volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} - err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + err := asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -801,8 +805,8 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) { if err != nil { t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: Actual: <%v>", err) } - - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -854,8 +858,8 @@ func Test_AddPodToVolume_Positive_SELinux(t *testing.T) { if err != nil { t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: Actual: <%v>", err) } - - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -933,8 +937,8 @@ func Test_MarkDeviceAsMounted_Positive_SELinux(t *testing.T) { if err != nil { t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: Actual: <%v>", err) } - - err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } @@ -980,8 +984,8 @@ func TestUncertainVolumeMounts(t *testing.T) { generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec( plugin, volumeSpec1) require.NoError(t, err) - - err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } diff --git a/pkg/kubelet/volumemanager/metrics/metrics_test.go b/pkg/kubelet/volumemanager/metrics/metrics_test.go index 151967bb242..5cf3cb8ea24 100644 --- a/pkg/kubelet/volumemanager/metrics/metrics_test.go +++ b/pkg/kubelet/volumemanager/metrics/metrics_test.go @@ -17,6 +17,7 @@ limitations under the License. package metrics import ( + "k8s.io/klog/v2/ktesting" "testing" v1 "k8s.io/api/core/v1" @@ -74,7 +75,8 @@ func TestMetricCollection(t *testing.T) { // Add one volume to ActualStateOfWorld devicePath := "fake/device/path" - err = asw.MarkVolumeAsAttached("", volumeSpec, "", devicePath) + logger, _ := ktesting.NewTestContext(t) + err = asw.MarkVolumeAsAttached(logger, "", volumeSpec, "", devicePath) if err != nil { t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) } diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index b4b8ee0c083..80ef98aafb2 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -374,7 +374,7 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize( dswp.desiredStateOfWorld.UpdatePersistentVolumeSize(uniqueVolumeName, pvCap) // in case the actualStateOfWorld was rebuild after kubelet restart ensure that claimSize is set to accurate value - dswp.actualStateOfWorld.InitializeClaimSize(uniqueVolumeName, pvcStatusCap) + dswp.actualStateOfWorld.InitializeClaimSize(klog.TODO(), uniqueVolumeName, pvcStatusCap) } func getUniqueVolumeName( diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go index f32c087bb6d..3f96add355f 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator_test.go @@ -17,6 +17,7 @@ limitations under the License. package populator import ( + "k8s.io/klog/v2/ktesting" "testing" "time" @@ -134,7 +135,8 @@ func TestFindAndAddNewPods_WithRescontructedVolume(t *testing.T) { VolumeSpec: volume.NewSpecFromPersistentVolume(pv, false), VolumeMountState: operationexecutor.VolumeMounted, } - dswp.actualStateOfWorld.MarkVolumeAsAttached(opts.VolumeName, opts.VolumeSpec, "fake-node", "") + logger, _ := ktesting.NewTestContext(t) + dswp.actualStateOfWorld.MarkVolumeAsAttached(logger, opts.VolumeName, opts.VolumeSpec, "fake-node", "") dswp.actualStateOfWorld.MarkVolumeAsMounted(opts) dswp.findAndAddNewPods() @@ -1393,8 +1395,9 @@ func volumeCapacity(size int) v1.ResourceList { } func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) { + logger, _ := ktesting.NewTestContext(t) for _, volumeToMount := range dsw.GetVolumesToMount() { - err := asw.MarkVolumeAsAttached(volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "") + err := asw.MarkVolumeAsAttached(logger, volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "") if err != nil { t.Fatalf("Unexpected error when MarkVolumeAsAttached: %v", err) } diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_common.go b/pkg/kubelet/volumemanager/reconciler/reconciler_common.go index 13726c77202..2894dc5238d 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_common.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_common.go @@ -233,6 +233,7 @@ func (rc *reconciler) mountAttachedVolumes(volumeToMount cache.VolumeToMount, po } func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) { + logger := klog.TODO() if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable { //// lets not spin a goroutine and unnecessarily trigger exponential backoff if this happens if volumeToMount.PluginIsAttachable && !volumeToMount.ReportedInUse { @@ -243,6 +244,7 @@ func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) { // for controller to finish attaching volume. klog.V(5).InfoS(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""), "pod", klog.KObj(volumeToMount.Pod)) err := rc.operationExecutor.VerifyControllerAttachedVolume( + logger, volumeToMount.VolumeToMount, rc.nodeName, rc.actualStateOfWorld) @@ -261,7 +263,7 @@ func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) { NodeName: rc.nodeName, } klog.V(5).InfoS(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""), "pod", klog.KObj(volumeToMount.Pod)) - err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld) + err := rc.operationExecutor.AttachVolume(logger, volumeToAttach, rc.actualStateOfWorld) if err != nil && !isExpectedError(err) { klog.ErrorS(err, volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error(), "pod", klog.KObj(volumeToMount.Pod)) } @@ -297,7 +299,7 @@ func (rc *reconciler) unmountDetachDevices() { // Only detach if kubelet detach is enabled klog.V(5).InfoS(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", "")) err := rc.operationExecutor.DetachVolume( - attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld) + klog.TODO(), attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld) if err != nil && !isExpectedError(err) { klog.ErrorS(err, attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error()) } diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index e331f62e4c0..60bddffe757 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -40,6 +40,7 @@ import ( core "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" @@ -2425,7 +2426,7 @@ func TestSyncStates(t *testing.T) { rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths) rcInstance, _ := rc.(*reconciler) - + logger, _ := ktesting.NewTestContext(t) for _, tpodInfo := range tc.podInfos { pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName) volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} @@ -2435,7 +2436,7 @@ func TestSyncStates(t *testing.T) { if err != nil { t.Fatalf("error adding volume %s to dsow: %v", volumeSpec.Name(), err) } - rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "") + rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "") } rcInstance.syncStates(tmpKubeletPodDir) diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct.go b/pkg/kubelet/volumemanager/reconciler/reconstruct.go index f245f6c2088..71bc69e8f0b 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct.go @@ -140,7 +140,7 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*gl for _, gvl := range volumesNeedUpdate { err := rc.actualStateOfWorld.MarkVolumeAsAttached( //TODO: the devicePath might not be correct for some volume plugins: see issue #54108 - gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath) + klog.TODO(), gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath) if err != nil { klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName) continue diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct_new.go b/pkg/kubelet/volumemanager/reconciler/reconstruct_new.go index 07d8b0f2887..51d52c0b1be 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct_new.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct_new.go @@ -107,7 +107,7 @@ func (rc *reconciler) updateStatesNew(reconstructedVolumes map[v1.UniqueVolumeNa for _, gvl := range reconstructedVolumes { err := rc.actualStateOfWorld.MarkVolumeAsAttached( //TODO: the devicePath might not be correct for some volume plugins: see issue #54108 - gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath) + klog.TODO(), gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath) if err != nil { klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName) continue diff --git a/pkg/kubelet/volumemanager/reconciler/reconstruct_new_test.go b/pkg/kubelet/volumemanager/reconciler/reconstruct_new_test.go index 7f5f57f8e45..5c45acbc54a 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconstruct_new_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconstruct_new_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/volume" volumetesting "k8s.io/kubernetes/pkg/volume/testing" @@ -205,7 +206,7 @@ func TestCleanOrphanVolumes(t *testing.T) { rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths) rcInstance, _ := rc.(*reconciler) rcInstance.volumesFailedReconstruction = tc.volumesFailedReconstruction - + logger, _ := ktesting.NewTestContext(t) for _, tpodInfo := range tc.podInfos { pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName) volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} @@ -215,7 +216,7 @@ func TestCleanOrphanVolumes(t *testing.T) { if err != nil { t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err) } - rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "") + rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "") } // Act @@ -324,7 +325,8 @@ func TestReconstructVolumesMount(t *testing.T) { if err != nil { t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err) } - rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "") + logger, _ := ktesting.NewTestContext(t) + rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "") rcInstance.populatorHasAddedPods = func() bool { // Mark DSW populated to allow unmounting of volumes. diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go index 16b6ee85733..366450492fa 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go @@ -43,6 +43,7 @@ import ( k8stesting "k8s.io/client-go/testing" "k8s.io/component-helpers/storage/volume" "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller" pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing" ) @@ -150,7 +151,8 @@ type testEnv struct { func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv { client := &fake.Clientset{} - reactor := pvtesting.NewVolumeReactor(client, nil, nil, nil) + _, ctx := ktesting.NewTestContext(t) + reactor := pvtesting.NewVolumeReactor(ctx, client, nil, nil, nil) // TODO refactor all tests to use real watch mechanism, see #72327 client.AddWatchReactor("*", func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) { gvr := action.GetResource() diff --git a/pkg/volume/azure_file/azure_provision.go b/pkg/volume/azure_file/azure_provision.go index 799bc8576da..9635348b3da 100644 --- a/pkg/volume/azure_file/azure_provision.go +++ b/pkg/volume/azure_file/azure_provision.go @@ -24,7 +24,6 @@ import ( "strings" "github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage" - v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -62,7 +61,7 @@ type azureFileDeleter struct { azureProvider azureCloudProvider } -func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *azureFilePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) if err != nil { klog.V(4).Infof("failed to get azure provider") @@ -102,7 +101,7 @@ func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureU } } -func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *azureFilePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) if err != nil { klog.V(4).Infof("failed to get azure provider") diff --git a/pkg/volume/gcepd/gce_pd.go b/pkg/volume/gcepd/gce_pd.go index 0df9ef10f5a..7bbeade0ef0 100644 --- a/pkg/volume/gcepd/gce_pd.go +++ b/pkg/volume/gcepd/gce_pd.go @@ -228,7 +228,7 @@ func (plugin *gcePersistentDiskPlugin) newUnmounterInternal(volName string, podU }}, nil } -func (plugin *gcePersistentDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *gcePersistentDiskPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { return plugin.newDeleterInternal(spec, &GCEDiskUtil{}) } @@ -245,7 +245,7 @@ func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, man }}, nil } -func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *gcePersistentDiskPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { return plugin.newProvisionerInternal(options, &GCEDiskUtil{}) } diff --git a/pkg/volume/hostpath/host_path.go b/pkg/volume/hostpath/host_path.go index 88c0da17c63..5a6e1027455 100644 --- a/pkg/volume/hostpath/host_path.go +++ b/pkg/volume/hostpath/host_path.go @@ -18,6 +18,7 @@ package hostpath import ( "fmt" + "k8s.io/klog/v2" "os" "regexp" @@ -172,11 +173,11 @@ func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRec return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder) } -func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *hostPathPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { return newDeleter(spec, plugin.host) } -func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *hostPathPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { if !plugin.config.ProvisioningEnabled { return nil, fmt.Errorf("provisioning in volume plugin %q is disabled", plugin.GetPluginName()) } diff --git a/pkg/volume/hostpath/host_path_test.go b/pkg/volume/hostpath/host_path_test.go index a77c642f9a8..ec218977900 100644 --- a/pkg/volume/hostpath/host_path_test.go +++ b/pkg/volume/hostpath/host_path_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/client-go/kubernetes/fake" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" "k8s.io/kubernetes/pkg/volume/util/hostutil" @@ -111,7 +112,8 @@ func TestDeleter(t *testing.T) { if err != nil { t.Fatal("Can't find the plugin by name") } - deleter, err := plug.NewDeleter(spec) + logger, _ := ktesting.NewTestContext(t) + deleter, err := plug.NewDeleter(logger, spec) if err != nil { t.Errorf("Failed to make a new Deleter: %v", err) } @@ -135,13 +137,13 @@ func TestDeleterTempDir(t *testing.T) { "not-tmp": {true, "/nottmp"}, "good-tmp": {false, "/tmp/scratch"}, } - + logger, _ := ktesting.NewTestContext(t) for name, test := range tests { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil)) spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}} plug, _ := plugMgr.FindDeletablePluginBySpec(spec) - deleter, _ := plug.NewDeleter(spec) + deleter, _ := plug.NewDeleter(logger, spec) err := deleter.Delete() if err == nil && test.expectedFailure { t.Errorf("Expected failure for test '%s' but got nil err", name) @@ -167,7 +169,8 @@ func TestProvisioner(t *testing.T) { PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}), PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete, } - creator, err := plug.NewProvisioner(options) + logger, _ := ktesting.NewTestContext(t) + creator, err := plug.NewProvisioner(logger, options) if err != nil { t.Fatalf("Failed to make a new Provisioner: %v", err) } diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index c0ec12f0c0f..8f1e3da32cf 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -213,7 +213,7 @@ type DeletableVolumePlugin interface { // NewDeleter creates a new volume.Deleter which knows how to delete this // resource in accordance with the underlying storage provider after the // volume's release from a claim - NewDeleter(spec *Spec) (Deleter, error) + NewDeleter(logger klog.Logger, spec *Spec) (Deleter, error) } // ProvisionableVolumePlugin is an extended interface of VolumePlugin and is @@ -223,7 +223,7 @@ type ProvisionableVolumePlugin interface { // NewProvisioner creates a new volume.Provisioner which knows how to // create PersistentVolumes in accordance with the plugin's underlying // storage provider - NewProvisioner(options VolumeOptions) (Provisioner, error) + NewProvisioner(logger klog.Logger, options VolumeOptions) (Provisioner, error) } // AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment diff --git a/pkg/volume/portworx/portworx.go b/pkg/volume/portworx/portworx.go index 6cba5032716..e0eaf94495d 100644 --- a/pkg/volume/portworx/portworx.go +++ b/pkg/volume/portworx/portworx.go @@ -160,7 +160,7 @@ func (plugin *portworxVolumePlugin) newUnmounterInternal(volName string, podUID }}, nil } -func (plugin *portworxVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *portworxVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { return plugin.newDeleterInternal(spec, plugin.util) } @@ -178,7 +178,7 @@ func (plugin *portworxVolumePlugin) newDeleterInternal(spec *volume.Spec, manage }}, nil } -func (plugin *portworxVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *portworxVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { return plugin.newProvisionerInternal(options, plugin.util) } diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 5a9fe52a795..4c0f9627aa9 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -592,7 +592,7 @@ func (plugin *rbdPlugin) getDeviceNameFromOldMountPath(mounter mount.Interface, return "", fmt.Errorf("can't find source name from mounted path: %s", mountPath) } -func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *rbdPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil { return nil, fmt.Errorf("spec.PersistentVolume.Spec.RBD is nil") } @@ -615,7 +615,7 @@ func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret str }}, nil } -func (plugin *rbdPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *rbdPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { return plugin.newProvisionerInternal(options, &rbdUtil{}) } diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index 38bfc08252a..71be0fd6d3b 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -18,6 +18,7 @@ package testing import ( "fmt" + "k8s.io/klog/v2" "os" "path/filepath" goruntime "runtime" @@ -441,11 +442,11 @@ func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventR return nil } -func (plugin *FakeVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *FakeVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { return &FakeDeleter{"/attributesTransferredFromSpec", volume.MetricsNil{}}, nil } -func (plugin *FakeVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *FakeVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { plugin.Lock() defer plugin.Unlock() plugin.LastProvisionerOptions = options diff --git a/pkg/volume/util/operationexecutor/fakegenerator.go b/pkg/volume/util/operationexecutor/fakegenerator.go index 88d980e711d..6cc7559e513 100644 --- a/pkg/volume/util/operationexecutor/fakegenerator.go +++ b/pkg/volume/util/operationexecutor/fakegenerator.go @@ -17,10 +17,11 @@ limitations under the License. package operationexecutor import ( - "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog/v2" "time" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/types" csitrans "k8s.io/csi-translation-lib" "k8s.io/kubernetes/pkg/volume" @@ -54,11 +55,11 @@ func (f *fakeOGCounter) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, return f.recordFuncCall("GenerateUnmountVolumeFunc"), nil } -func (f *fakeOGCounter) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { +func (f *fakeOGCounter) GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { return f.recordFuncCall("GenerateAttachVolumeFunc") } -func (f *fakeOGCounter) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { +func (f *fakeOGCounter) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { return f.recordFuncCall("GenerateDetachVolumeFunc"), nil } @@ -70,7 +71,7 @@ func (f *fakeOGCounter) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, return f.recordFuncCall("GenerateUnmountDeviceFunc"), nil } -func (f *fakeOGCounter) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { +func (f *fakeOGCounter) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { return f.recordFuncCall("GenerateVerifyControllerAttachedVolumeFunc"), nil } diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index 6785f58aab4..b11183550b4 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -65,7 +65,7 @@ import ( type OperationExecutor interface { // AttachVolume attaches the volume to the node specified in volumeToAttach. // It then updates the actual state of the world to reflect that. - AttachVolume(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error + AttachVolume(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error // VerifyVolumesAreAttachedPerNode verifies the given list of volumes to see whether they are still attached to the node. // If any volume is not attached right now, it will update the actual state of the world to reflect that. @@ -83,7 +83,7 @@ type OperationExecutor interface { // that. If verifySafeToDetach is set, a call is made to the fetch the node // object and it is used to verify that the volume does not exist in Node's // Status.VolumesInUse list (operation fails with error if it is). - DetachVolume(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error + DetachVolume(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error // If a volume has 'Filesystem' volumeMode, MountVolume mounts the // volume to the pod specified in volumeToMount. @@ -139,7 +139,7 @@ type OperationExecutor interface { // If the volume is not found or there is an error (fetching the node // object, for example) then an error is returned which triggers exponential // back off on retries. - VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error + VerifyControllerAttachedVolume(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error // IsOperationPending returns true if an operation for the given volumeName // and one of podName or nodeName is pending, otherwise it returns false @@ -245,13 +245,13 @@ type ActualStateOfWorldAttacherUpdater interface { // TODO: in the future, we should be able to remove the volumeName // argument to this method -- since it is used only for attachable // volumes. See issue 29695. - MarkVolumeAsAttached(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error + MarkVolumeAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error // Marks the specified volume as *possibly* attached to the specified node. // If an attach operation fails, the attach/detach controller does not know for certain if the volume is attached or not. // If the volume name is supplied, that volume name will be used. If not, the // volume name is computed using the result from querying the plugin. - MarkVolumeAsUncertain(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error + MarkVolumeAsUncertain(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error // Marks the specified volume as detached from the specified node MarkVolumeAsDetached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) @@ -262,10 +262,10 @@ type ActualStateOfWorldAttacherUpdater interface { // Unmarks the desire to detach for the specified volume (add the volume back to // the node's volumesToReportAsAttached list) - AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) + AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) // InitializeClaimSize sets pvc claim size by reading pvc.Status.Capacity - InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) + InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity } @@ -789,10 +789,11 @@ func (oe *operationExecutor) IsOperationSafeToRetry( } func (oe *operationExecutor) AttachVolume( + logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { generatedOperations := - oe.operationGenerator.GenerateAttachVolumeFunc(volumeToAttach, actualStateOfWorld) + oe.operationGenerator.GenerateAttachVolumeFunc(logger, volumeToAttach, actualStateOfWorld) if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) { return oe.pendingOperations.Run( @@ -804,11 +805,12 @@ func (oe *operationExecutor) AttachVolume( } func (oe *operationExecutor) DetachVolume( + logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { generatedOperations, err := - oe.operationGenerator.GenerateDetachVolumeFunc(volumeToDetach, verifySafeToDetach, actualStateOfWorld) + oe.operationGenerator.GenerateDetachVolumeFunc(logger, volumeToDetach, verifySafeToDetach, actualStateOfWorld) if err != nil { return err } @@ -1039,11 +1041,12 @@ func (oe *operationExecutor) ExpandInUseVolume(volumeToMount VolumeToMount, actu } func (oe *operationExecutor) VerifyControllerAttachedVolume( + logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error { generatedOperations, err := - oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld) + oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(logger, volumeToMount, nodeName, actualStateOfWorld) if err != nil { return err } diff --git a/pkg/volume/util/operationexecutor/operation_executor_test.go b/pkg/volume/util/operationexecutor/operation_executor_test.go index b219224946c..3a40f9a9922 100644 --- a/pkg/volume/util/operationexecutor/operation_executor_test.go +++ b/pkg/volume/util/operationexecutor/operation_executor_test.go @@ -18,16 +18,18 @@ package operationexecutor import ( "fmt" - "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/klog/v2" "strconv" "testing" "time" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" csitrans "k8s.io/csi-translation-lib" + "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util/hostutil" volumetypes "k8s.io/kubernetes/pkg/volume/util/types" @@ -59,7 +61,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachableAndNonDevi // Act for i := range volumesToMount { - podName := "pod-" + strconv.Itoa((i + 1)) + podName := "pod-" + strconv.Itoa(i+1) pod := getTestPodWithSecret(podName, secretName) volumesToMount[i] = VolumeToMount{ Pod: pod, @@ -87,7 +89,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins(t *te volumeName := v1.UniqueVolumeName(pdName) // Act for i := range volumesToMount { - podName := "pod-" + strconv.Itoa((i + 1)) + podName := "pod-" + strconv.Itoa(i+1) pod := getTestPodWithGCEPD(podName, pdName) volumesToMount[i] = VolumeToMount{ Pod: pod, @@ -114,7 +116,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForDeviceMountablePlugins( volumeName := v1.UniqueVolumeName(pdName) // Act for i := range volumesToMount { - podName := "pod-" + strconv.Itoa((i + 1)) + podName := "pod-" + strconv.Itoa(i+1) pod := getTestPodWithGCEPD(podName, pdName) volumesToMount[i] = VolumeToMount{ Pod: pod, @@ -209,7 +211,8 @@ func TestOperationExecutor_AttachSingleNodeVolumeConcurrentlyToSameNode(t *testi }, }, } - oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) } // Assert @@ -239,7 +242,8 @@ func TestOperationExecutor_AttachMultiNodeVolumeConcurrentlyToSameNode(t *testin }, }, } - oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) } // Assert @@ -269,7 +273,8 @@ func TestOperationExecutor_AttachSingleNodeVolumeConcurrentlyToDifferentNodes(t }, }, } - oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) } // Assert @@ -297,7 +302,8 @@ func TestOperationExecutor_AttachMultiNodeVolumeConcurrentlyToDifferentNodes(t * }, }, } - oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */) } // Assert @@ -327,7 +333,8 @@ func TestOperationExecutor_DetachSingleNodeVolumeConcurrentlyFromSameNode(t *tes }, }, } - oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) } // Assert @@ -357,7 +364,8 @@ func TestOperationExecutor_DetachMultiNodeVolumeConcurrentlyFromSameNode(t *test }, }, } - oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) } // Assert @@ -385,7 +393,8 @@ func TestOperationExecutor_DetachMultiNodeVolumeConcurrentlyFromDifferentNodes(t }, }, } - oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */) } // Assert @@ -440,7 +449,8 @@ func TestOperationExecutor_VerifyControllerAttachedVolumeConcurrently(t *testing volumesToMount[i] = VolumeToMount{ VolumeName: v1.UniqueVolumeName(pdName), } - oe.VerifyControllerAttachedVolume(volumesToMount[i], types.NodeName("node-name"), nil /* actualStateOfWorldMounterUpdater */) + logger, _ := ktesting.NewTestContext(t) + oe.VerifyControllerAttachedVolume(logger, volumesToMount[i], types.NodeName("node-name"), nil /* actualStateOfWorldMounterUpdater */) } // Assert @@ -460,7 +470,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins_Vo // Act for i := range volumesToMount { - podName := "pod-" + strconv.Itoa((i + 1)) + podName := "pod-" + strconv.Itoa(i+1) pod := getTestPodWithSecret(podName, secretName) volumesToMount[i] = VolumeToMount{ Pod: pod, @@ -491,7 +501,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins_Volum // Act for i := range volumesToMount { - podName := "pod-" + strconv.Itoa((i + 1)) + podName := "pod-" + strconv.Itoa(i+1) pod := getTestPodWithGCEPD(podName, pdName) volumesToMount[i] = VolumeToMount{ Pod: pod, @@ -603,7 +613,7 @@ func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount Mo OperationFunc: opFunc, }, nil } -func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { +func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { opFunc := func() volumetypes.OperationContext { startOperationAndBlock(fopg.ch, fopg.quit) return volumetypes.NewOperationContext(nil, nil, false) @@ -612,7 +622,7 @@ func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach Volu OperationFunc: opFunc, } } -func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { +func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { opFunc := func() volumetypes.OperationContext { startOperationAndBlock(fopg.ch, fopg.quit) return volumetypes.NewOperationContext(nil, nil, false) @@ -639,7 +649,7 @@ func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach Att OperationFunc: opFunc, }, nil } -func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { +func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { opFunc := func() volumetypes.OperationContext { startOperationAndBlock(fopg.ch, fopg.quit) return volumetypes.NewOperationContext(nil, nil, false) diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index 269c8b51636..d47d422f888 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -121,10 +121,10 @@ type OperationGenerator interface { GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error) // Generates the AttachVolume function needed to perform attach of a volume plugin - GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations + GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations // Generates the DetachVolume function needed to perform the detach of a volume plugin - GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) + GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) // Generates the VolumesAreAttached function needed to verify if volume plugins are attached GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) @@ -133,7 +133,7 @@ type OperationGenerator interface { GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter hostutil.HostUtils) (volumetypes.GeneratedOperations, error) // Generates the function needed to check if the attach_detach controller has attached the volume plugin - GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) + GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) // Generates the MapVolume function needed to perform the map of a volume plugin GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error) @@ -348,6 +348,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc( } func (og *operationGenerator) GenerateAttachVolumeFunc( + logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations { @@ -378,6 +379,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( uncertainNode = derr.CurrentNode } addErr := actualStateOfWorld.MarkVolumeAsUncertain( + logger, volumeToAttach.VolumeName, volumeToAttach.VolumeSpec, uncertainNode) @@ -399,7 +401,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc( // Update actual state of world addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( - v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath) + logger, v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) @@ -447,6 +449,7 @@ func (og *operationGenerator) GetCSITranslator() InTreeToCSITranslator { } func (og *operationGenerator) GenerateDetachVolumeFunc( + logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { @@ -505,7 +508,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc( if err != nil { // On failure, add volume back to ReportAsAttached list actualStateOfWorld.AddVolumeToReportAsAttached( - volumeToDetach.VolumeName, volumeToDetach.NodeName) + logger, volumeToDetach.VolumeName, volumeToDetach.NodeName) eventErr, detailedErr := volumeToDetach.GenerateError("DetachVolume.Detach failed", err) return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } @@ -1501,6 +1504,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc( } func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( + logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) { @@ -1548,13 +1552,13 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( // updated accordingly. addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( - volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */) + logger, volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed", addVolumeNodeErr) return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } - actualStateOfWorld.InitializeClaimSize(volumeToMount.VolumeName, claimSize) + actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize) return volumetypes.NewOperationContext(nil, nil, migrated) } @@ -1588,14 +1592,14 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc( for _, attachedVolume := range node.Status.VolumesAttached { if attachedVolume.Name == volumeToMount.VolumeName { addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached( - v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath) + logger, v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath) klog.InfoS(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath)), "pod", klog.KObj(volumeToMount.Pod)) if addVolumeNodeErr != nil { // On failure, return error. Caller will log and retry. eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr) return volumetypes.NewOperationContext(eventErr, detailedErr, migrated) } - actualStateOfWorld.InitializeClaimSize(volumeToMount.VolumeName, claimSize) + actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize) return volumetypes.NewOperationContext(nil, nil, migrated) } } diff --git a/pkg/volume/vsphere_volume/vsphere_volume.go b/pkg/volume/vsphere_volume/vsphere_volume.go index 6f7d02974c5..0660eed66bb 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume.go +++ b/pkg/volume/vsphere_volume/vsphere_volume.go @@ -324,7 +324,7 @@ type vsphereVolumeDeleter struct { var _ volume.Deleter = &vsphereVolumeDeleter{} -func (plugin *vsphereVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { +func (plugin *vsphereVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) { return plugin.newDeleterInternal(spec, &VsphereDiskUtil{}) } @@ -353,7 +353,7 @@ type vsphereVolumeProvisioner struct { var _ volume.Provisioner = &vsphereVolumeProvisioner{} -func (plugin *vsphereVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { +func (plugin *vsphereVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) { return plugin.newProvisionerInternal(options, &VsphereDiskUtil{}) } diff --git a/test/integration/volume/attach_detach_test.go b/test/integration/volume/attach_detach_test.go index bb6360762f4..56d3bc711db 100644 --- a/test/integration/volume/attach_detach_test.go +++ b/test/integration/volume/attach_detach_test.go @@ -31,6 +31,7 @@ import ( restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" fakecloud "k8s.io/cloud-provider/fake" + "k8s.io/klog/v2/ktesting" kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" "k8s.io/kubernetes/pkg/controller/volume/attachdetach" volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" @@ -161,7 +162,6 @@ func TestPodDeletionWithDswp(t *testing.T) { defer framework.DeleteNamespaceOrDie(testClient, ns, t) pod := fakePodWithVol(namespaceName) - podStopCh := make(chan struct{}) if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to created node : %v", err) @@ -183,7 +183,7 @@ func TestPodDeletionWithDswp(t *testing.T) { go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) initCSIObjects(ctx.Done(), informers) - go ctrl.Run(ctx.Done()) + go ctrl.Run(ctx) // Run pvCtrl to avoid leaking goroutines started during its creation. go pvCtrl.Run(ctx) @@ -201,7 +201,6 @@ func TestPodDeletionWithDswp(t *testing.T) { waitForPodsInDSWP(t, ctrl.GetDesiredStateOfWorld()) // let's stop pod events from getting triggered - close(podStopCh) err = podInformer.GetStore().Delete(podInformerObj) if err != nil { t.Fatalf("Error deleting pod : %v", err) @@ -262,7 +261,7 @@ func TestPodUpdateWithWithADC(t *testing.T) { go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) initCSIObjects(ctx.Done(), informers) - go ctrl.Run(ctx.Done()) + go ctrl.Run(ctx) // Run pvCtrl to avoid leaking goroutines started during its creation. go pvCtrl.Run(ctx) @@ -335,7 +334,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) { go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) initCSIObjects(ctx.Done(), informers) - go ctrl.Run(ctx.Done()) + go ctrl.Run(ctx) // Run pvCtrl to avoid leaking goroutines started during its creation. go pvCtrl.Run(ctx) @@ -426,7 +425,9 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync plugins := []volume.VolumePlugin{plugin} cloud := &fakecloud.Cloud{} informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod) + logger, ctx := ktesting.NewTestContext(t) ctrl, err := attachdetach.NewAttachDetachController( + logger, testClient, informers.Core().V1().Pods(), informers.Core().V1().Nodes(), @@ -463,7 +464,7 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync NodeInformer: informers.Core().V1().Nodes(), EnableDynamicProvisioning: false, } - pvCtrl, err := persistentvolume.NewController(params) + pvCtrl, err := persistentvolume.NewController(ctx, params) if err != nil { t.Fatalf("Failed to create PV controller: %v", err) } @@ -509,14 +510,15 @@ func TestPodAddedByDswp(t *testing.T) { go podInformer.Run(podStopCh) // start controller loop - ctx, cancel := context.WithCancel(context.Background()) + _, ctx := ktesting.NewTestContext(t) + ctx, cancel := context.WithCancel(ctx) defer cancel() go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done()) go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done()) go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done()) initCSIObjects(ctx.Done(), informers) - go ctrl.Run(ctx.Done()) + go ctrl.Run(ctx) // Run pvCtrl to avoid leaking goroutines started during its creation. go pvCtrl.Run(ctx) @@ -605,7 +607,7 @@ func TestPVCBoundWithADC(t *testing.T) { informers.Start(ctx.Done()) informers.WaitForCacheSync(ctx.Done()) initCSIObjects(ctx.Done(), informers) - go ctrl.Run(ctx.Done()) + go ctrl.Run(ctx) go pvCtrl.Run(ctx) waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4) diff --git a/test/integration/volume/persistent_volumes_test.go b/test/integration/volume/persistent_volumes_test.go index e5d5c2075e4..fcc24e4ffd5 100644 --- a/test/integration/volume/persistent_volumes_test.go +++ b/test/integration/volume/persistent_volumes_test.go @@ -47,6 +47,7 @@ import ( "k8s.io/kubernetes/test/integration/framework" "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" ) // Several tests in this file are configurable by environment variables: @@ -1357,7 +1358,9 @@ func createClients(namespaceName string, t *testing.T, s *kubeapiservertesting.T plugins := []volume.VolumePlugin{plugin} cloud := &fakecloud.Cloud{} informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod)) + _, ctx := ktesting.NewTestContext(t) ctrl, err := persistentvolumecontroller.NewController( + ctx, persistentvolumecontroller.ControllerParameters{ KubeClient: binderClient, SyncPeriod: getSyncPeriod(syncPeriod), diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index 0f379495155..efb2f512081 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -28,6 +28,7 @@ import ( "time" "k8s.io/klog/v2" + "k8s.io/klog/v2/ktesting" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -1128,8 +1129,8 @@ func initPVController(t *testing.T, testCtx *testutil.TestContext, provisionDela NodeInformer: informerFactory.Core().V1().Nodes(), EnableDynamicProvisioning: true, } - - ctrl, err := persistentvolume.NewController(params) + _, ctx := ktesting.NewTestContext(t) + ctrl, err := persistentvolume.NewController(ctx, params) if err != nil { return nil, nil, err }