volume: use contextual logging

This commit is contained in:
杨军10092085 2022-11-03 17:19:04 +08:00
parent b740a34302
commit 361e4ff0fa
61 changed files with 1326 additions and 1151 deletions

View File

@ -271,7 +271,8 @@ func startPersistentVolumeBinderController(ctx context.Context, controllerContex
EnableDynamicProvisioning: controllerContext.ComponentConfig.PersistentVolumeBinderController.VolumeConfiguration.EnableDynamicProvisioning,
FilteredDialOptions: filteredDialOptions,
}
volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params)
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-binder-controller"))
volumeController, volumeControllerErr := persistentvolumecontroller.NewController(ctx, params)
if volumeControllerErr != nil {
return nil, true, fmt.Errorf("failed to construct persistentvolume controller: %v", volumeControllerErr)
}
@ -295,8 +296,11 @@ func startAttachDetachController(ctx context.Context, controllerContext Controll
return nil, true, err
}
logger := klog.LoggerWithName(klog.FromContext(ctx), "attachdetach-controller")
ctx = klog.NewContext(ctx, logger)
attachDetachController, attachDetachControllerErr :=
attachdetach.NewAttachDetachController(
logger,
controllerContext.ClientBuilder.ClientOrDie("attachdetach-controller"),
controllerContext.InformerFactory.Core().V1().Pods(),
controllerContext.InformerFactory.Core().V1().Nodes(),
@ -316,7 +320,7 @@ func startAttachDetachController(ctx context.Context, controllerContext Controll
if attachDetachControllerErr != nil {
return nil, true, fmt.Errorf("failed to start attach/detach controller: %v", attachDetachControllerErr)
}
go attachDetachController.Run(ctx.Done())
go attachDetachController.Run(ctx)
return nil, true, nil
}
@ -346,12 +350,14 @@ func startVolumeExpandController(ctx context.Context, controllerContext Controll
if expandControllerErr != nil {
return nil, true, fmt.Errorf("failed to start volume expand controller: %v", expandControllerErr)
}
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-expander-controller"))
go expandController.Run(ctx)
return nil, true, nil
}
func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "ephemeral-volume-controller"))
ephemeralController, err := ephemeral.NewController(
controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"),
controllerContext.InformerFactory.Core().V1().Pods(),
@ -548,7 +554,9 @@ func startGarbageCollectorController(ctx context.Context, controllerContext Cont
}
func startPVCProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolumeclaim-protection-controller"))
pvcProtectionController, err := pvcprotection.NewPVCProtectionController(
klog.FromContext(ctx),
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims(),
controllerContext.InformerFactory.Core().V1().Pods(),
controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"),
@ -561,7 +569,9 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control
}
func startPVProtectionController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
ctx = klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), "persistentvolume-protection-controller"))
go pvprotection.NewPVProtectionController(
klog.FromContext(ctx),
controllerContext.InformerFactory.Core().V1().PersistentVolumes(),
controllerContext.ClientBuilder.ClientOrDie("pv-protection-controller"),
).Run(ctx, 1)

View File

@ -384,7 +384,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
}
func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) error {
logger := klog.LoggerWithValues(klog.FromContext(ctx), "claim", klog.KRef(namespace, name))
logger := klog.LoggerWithValues(klog.FromContext(ctx), "PVC", klog.KRef(namespace, name))
ctx = klog.NewContext(ctx, logger)
claim, err := ec.claimLister.ResourceClaims(namespace).Get(name)
if err != nil {

View File

@ -19,6 +19,7 @@ limitations under the License.
package attachdetach
import (
"context"
"fmt"
"net"
"time"
@ -99,12 +100,13 @@ var DefaultTimerConfig = TimerConfig{
// AttachDetachController defines the operations supported by this controller.
type AttachDetachController interface {
Run(stopCh <-chan struct{})
Run(ctx context.Context)
GetDesiredStateOfWorld() cache.DesiredStateOfWorld
}
// NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController(
logger klog.Logger,
kubeClient clientset.Interface,
podInformer coreinformers.PodInformer,
nodeInformer coreinformers.NodeInformer,
@ -194,9 +196,15 @@ func NewAttachDetachController(
adc.intreeToCSITranslator)
podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.podAdd,
UpdateFunc: adc.podUpdate,
DeleteFunc: adc.podDelete,
AddFunc: func(obj interface{}) {
adc.podAdd(logger, obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
adc.podUpdate(logger, oldObj, newObj)
},
DeleteFunc: func(obj interface{}) {
adc.podDelete(logger, obj)
},
})
// This custom indexer will index pods by its PVC keys. Then we don't need
@ -206,9 +214,15 @@ func NewAttachDetachController(
}
nodeInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
AddFunc: adc.nodeAdd,
UpdateFunc: adc.nodeUpdate,
DeleteFunc: adc.nodeDelete,
AddFunc: func(obj interface{}) {
adc.nodeAdd(logger, obj)
},
UpdateFunc: func(oldObj, newObj interface{}) {
adc.nodeUpdate(logger, oldObj, newObj)
},
DeleteFunc: func(obj interface{}) {
adc.nodeDelete(logger, obj)
},
})
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
@ -316,7 +330,7 @@ type attachDetachController struct {
filteredDialOptions *proxyutil.FilteredDialOptions
}
func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
func (adc *attachDetachController) Run(ctx context.Context) {
defer runtime.HandleCrash()
defer adc.pvcQueue.ShutDown()
@ -325,8 +339,9 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
adc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: adc.kubeClient.CoreV1().Events("")})
defer adc.broadcaster.Shutdown()
klog.Infof("Starting attach detach controller")
defer klog.Infof("Shutting down attach detach controller")
logger := klog.FromContext(ctx)
logger.Info("Starting attach detach controller")
defer logger.Info("Shutting down attach detach controller")
synced := []kcache.InformerSynced{adc.podsSynced, adc.nodesSynced, adc.pvcsSynced, adc.pvsSynced}
if adc.csiNodeSynced != nil {
@ -339,21 +354,21 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
synced = append(synced, adc.volumeAttachmentSynced)
}
if !kcache.WaitForNamedCacheSync("attach detach", stopCh, synced...) {
if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(), synced...) {
return
}
err := adc.populateActualStateOfWorld()
err := adc.populateActualStateOfWorld(logger)
if err != nil {
klog.Errorf("Error populating the actual state of world: %v", err)
logger.Error(err, "Error populating the actual state of world")
}
err = adc.populateDesiredStateOfWorld()
err = adc.populateDesiredStateOfWorld(logger)
if err != nil {
klog.Errorf("Error populating the desired state of world: %v", err)
logger.Error(err, "Error populating the desired state of world")
}
go adc.reconciler.Run(stopCh)
go adc.desiredStateOfWorldPopulator.Run(stopCh)
go wait.Until(adc.pvcWorker, time.Second, stopCh)
go adc.reconciler.Run(ctx)
go adc.desiredStateOfWorldPopulator.Run(ctx)
go wait.UntilWithContext(ctx, adc.pvcWorker, time.Second)
metrics.Register(adc.pvcLister,
adc.pvLister,
adc.podLister,
@ -363,11 +378,11 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
adc.csiMigratedPluginManager,
adc.intreeToCSITranslator)
<-stopCh
<-ctx.Done()
}
func (adc *attachDetachController) populateActualStateOfWorld() error {
klog.V(5).Infof("Populating ActualStateOfworld")
func (adc *attachDetachController) populateActualStateOfWorld(logger klog.Logger) error {
logger.V(5).Info("Populating ActualStateOfworld")
nodes, err := adc.nodeLister.List(labels.Everything())
if err != nil {
return err
@ -382,18 +397,18 @@ func (adc *attachDetachController) populateActualStateOfWorld() error {
// volume spec is not needed to detach a volume. If the volume is used by a pod, it
// its spec can be: this would happen during in the populateDesiredStateOfWorld which
// scans the pods and updates their volumes in the ActualStateOfWorld too.
err = adc.actualStateOfWorld.MarkVolumeAsAttached(uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath)
err = adc.actualStateOfWorld.MarkVolumeAsAttached(logger, uniqueName, nil /* VolumeSpec */, nodeName, attachedVolume.DevicePath)
if err != nil {
klog.Errorf("Failed to mark the volume as attached: %v", err)
logger.Error(err, "Failed to mark the volume as attached")
continue
}
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse)
adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse)
adc.addNodeToDswp(node, types.NodeName(node.Name))
}
}
err = adc.processVolumeAttachments()
err = adc.processVolumeAttachments(logger)
if err != nil {
klog.Errorf("Failed to process volume attachments: %v", err)
logger.Error(err, "Failed to process volume attachments")
}
return err
}
@ -420,8 +435,8 @@ func (adc *attachDetachController) getNodeVolumeDevicePath(
return devicePath, err
}
func (adc *attachDetachController) populateDesiredStateOfWorld() error {
klog.V(5).Infof("Populating DesiredStateOfworld")
func (adc *attachDetachController) populateDesiredStateOfWorld(logger klog.Logger) error {
logger.V(5).Info("Populating DesiredStateOfworld")
pods, err := adc.podLister.List(labels.Everything())
if err != nil {
@ -429,56 +444,52 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
}
for _, pod := range pods {
podToAdd := pod
adc.podAdd(podToAdd)
adc.podAdd(logger, podToAdd)
for _, podVolume := range podToAdd.Spec.Volumes {
nodeName := types.NodeName(podToAdd.Spec.NodeName)
// The volume specs present in the ActualStateOfWorld are nil, let's replace those
// with the correct ones found on pods. The present in the ASW with no corresponding
// pod will be detached and the spec is irrelevant.
volumeSpec, err := util.CreateVolumeSpec(podVolume, podToAdd, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
volumeSpec, err := util.CreateVolumeSpec(logger, podVolume, podToAdd, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
if err != nil {
klog.Errorf(
"Error creating spec for volume %q, pod %q/%q: %v",
podVolume.Name,
podToAdd.Namespace,
podToAdd.Name,
err)
logger.Error(
err,
"Error creating spec for volume of pod",
"pod", klog.KObj(podToAdd),
"volumeName", podVolume.Name)
continue
}
plugin, err := adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil {
klog.V(10).Infof(
"Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v",
podVolume.Name,
podToAdd.Namespace,
podToAdd.Name,
err)
logger.V(10).Info(
"Skipping volume for pod: it does not implement attacher interface",
"pod", klog.KObj(podToAdd),
"volumeName", podVolume.Name,
"err", err)
continue
}
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil {
klog.Errorf(
"Failed to find unique name for volume %q, pod %q/%q: %v",
podVolume.Name,
podToAdd.Namespace,
podToAdd.Name,
err)
logger.Error(
err,
"Failed to find unique name for volume of pod",
"pod", klog.KObj(podToAdd),
"volumeName", podVolume.Name)
continue
}
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateAttached {
klog.V(10).Infof("Volume %q is attached to node %q. Marking as attached in ActualStateOfWorld",
volumeName,
nodeName,
)
logger.V(10).Info("Volume is attached to node. Marking as attached in ActualStateOfWorld",
"node", klog.KRef("", string(nodeName)),
"volumeName", volumeName)
devicePath, err := adc.getNodeVolumeDevicePath(volumeName, nodeName)
if err != nil {
klog.Errorf("Failed to find device path: %v", err)
logger.Error(err, "Failed to find device path")
continue
}
err = adc.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath)
err = adc.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, devicePath)
if err != nil {
klog.Errorf("Failed to update volume spec for node %s: %v", nodeName, err)
logger.Error(err, "Failed to update volume spec for node", "node", klog.KRef("", string(nodeName)))
}
}
}
@ -487,7 +498,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
return nil
}
func (adc *attachDetachController) podAdd(obj interface{}) {
func (adc *attachDetachController) podAdd(logger klog.Logger, obj interface{}) {
pod, ok := obj.(*v1.Pod)
if pod == nil || !ok {
return
@ -502,7 +513,7 @@ func (adc *attachDetachController) podAdd(obj interface{}) {
adc.desiredStateOfWorld,
true /* default volume action */)
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */
util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
}
@ -511,7 +522,7 @@ func (adc *attachDetachController) GetDesiredStateOfWorld() cache.DesiredStateOf
return adc.desiredStateOfWorld
}
func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) {
func (adc *attachDetachController) podUpdate(logger klog.Logger, oldObj, newObj interface{}) {
pod, ok := newObj.(*v1.Pod)
if pod == nil || !ok {
return
@ -526,21 +537,21 @@ func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) {
adc.desiredStateOfWorld,
true /* default volume action */)
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */
util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
}
func (adc *attachDetachController) podDelete(obj interface{}) {
func (adc *attachDetachController) podDelete(logger klog.Logger, obj interface{}) {
pod, ok := obj.(*v1.Pod)
if pod == nil || !ok {
return
}
util.ProcessPodVolumes(pod, false, /* addVolumes */
util.ProcessPodVolumes(logger, pod, false, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
}
func (adc *attachDetachController) nodeAdd(obj interface{}) {
func (adc *attachDetachController) nodeAdd(logger klog.Logger, obj interface{}) {
node, ok := obj.(*v1.Node)
// TODO: investigate if nodeName is empty then if we can return
// kubernetes/kubernetes/issues/37777
@ -548,15 +559,15 @@ func (adc *attachDetachController) nodeAdd(obj interface{}) {
return
}
nodeName := types.NodeName(node.Name)
adc.nodeUpdate(nil, obj)
adc.nodeUpdate(logger, nil, obj)
// kubernetes/kubernetes/issues/37586
// This is to workaround the case when a node add causes to wipe out
// the attached volumes field. This function ensures that we sync with
// the actual status.
adc.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
adc.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName)
}
func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) {
func (adc *attachDetachController) nodeUpdate(logger klog.Logger, oldObj, newObj interface{}) {
node, ok := newObj.(*v1.Node)
// TODO: investigate if nodeName is empty then if we can return
if node == nil || !ok {
@ -565,10 +576,10 @@ func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) {
nodeName := types.NodeName(node.Name)
adc.addNodeToDswp(node, nodeName)
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse)
adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse)
}
func (adc *attachDetachController) nodeDelete(obj interface{}) {
func (adc *attachDetachController) nodeDelete(logger klog.Logger, obj interface{}) {
node, ok := obj.(*v1.Node)
if node == nil || !ok {
return
@ -577,10 +588,10 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
nodeName := types.NodeName(node.Name)
if err := adc.desiredStateOfWorld.DeleteNode(nodeName); err != nil {
// This might happen during drain, but we still want it to appear in our logs
klog.Infof("error removing node %q from desired-state-of-world: %v", nodeName, err)
logger.Info("Error removing node from desired-state-of-world", "node", klog.KObj(node), "err", err)
}
adc.processVolumesInUse(nodeName, node.Status.VolumesInUse)
adc.processVolumesInUse(logger, nodeName, node.Status.VolumesInUse)
}
func (adc *attachDetachController) enqueuePVC(obj interface{}) {
@ -593,19 +604,19 @@ func (adc *attachDetachController) enqueuePVC(obj interface{}) {
}
// pvcWorker processes items from pvcQueue
func (adc *attachDetachController) pvcWorker() {
for adc.processNextItem() {
func (adc *attachDetachController) pvcWorker(ctx context.Context) {
for adc.processNextItem(klog.FromContext(ctx)) {
}
}
func (adc *attachDetachController) processNextItem() bool {
func (adc *attachDetachController) processNextItem(logger klog.Logger) bool {
keyObj, shutdown := adc.pvcQueue.Get()
if shutdown {
return false
}
defer adc.pvcQueue.Done(keyObj)
if err := adc.syncPVCByKey(keyObj.(string)); err != nil {
if err := adc.syncPVCByKey(logger, keyObj.(string)); err != nil {
// Rather than wait for a full resync, re-add the key to the
// queue to be processed.
adc.pvcQueue.AddRateLimited(keyObj)
@ -619,16 +630,16 @@ func (adc *attachDetachController) processNextItem() bool {
return true
}
func (adc *attachDetachController) syncPVCByKey(key string) error {
klog.V(5).Infof("syncPVCByKey[%s]", key)
func (adc *attachDetachController) syncPVCByKey(logger klog.Logger, key string) error {
logger.V(5).Info("syncPVCByKey", "pvcKey", key)
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
if err != nil {
klog.V(4).Infof("error getting namespace & name of pvc %q to get pvc from informer: %v", key, err)
logger.V(4).Info("Error getting namespace & name of pvc to get pvc from informer", "pvcKey", key, "err", err)
return nil
}
pvc, err := adc.pvcLister.PersistentVolumeClaims(namespace).Get(name)
if apierrors.IsNotFound(err) {
klog.V(4).Infof("error getting pvc %q from informer: %v", key, err)
logger.V(4).Info("Error getting pvc from informer", "pvcKey", key, "err", err)
return nil
}
if err != nil {
@ -658,7 +669,7 @@ func (adc *attachDetachController) syncPVCByKey(key string) error {
adc.desiredStateOfWorld,
true /* default volume action */)
util.ProcessPodVolumes(pod, volumeActionFlag, /* addVolumes */
util.ProcessPodVolumes(logger, pod, volumeActionFlag, /* addVolumes */
adc.desiredStateOfWorld, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
}
return nil
@ -669,8 +680,8 @@ func (adc *attachDetachController) syncPVCByKey(key string) error {
// corresponding volume in the actual state of the world to indicate that it is
// mounted.
func (adc *attachDetachController) processVolumesInUse(
nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) {
klog.V(4).Infof("processVolumesInUse for node %q", nodeName)
logger klog.Logger, nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) {
logger.V(4).Info("processVolumesInUse for node", "node", klog.KRef("", string(nodeName)))
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
mounted := false
for _, volumeInUse := range volumesInUse {
@ -679,11 +690,14 @@ func (adc *attachDetachController) processVolumesInUse(
break
}
}
err := adc.actualStateOfWorld.SetVolumeMountedByNode(attachedVolume.VolumeName, nodeName, mounted)
err := adc.actualStateOfWorld.SetVolumeMountedByNode(logger, attachedVolume.VolumeName, nodeName, mounted)
if err != nil {
klog.Warningf(
"SetVolumeMountedByNode(%q, %q, %v) returned an error: %v",
attachedVolume.VolumeName, nodeName, mounted, err)
logger.Info(
"SetVolumeMountedByNode returned an error",
"node", klog.KRef("", string(nodeName)),
"volumeName", attachedVolume.VolumeName,
"mounted", mounted,
"err", err)
}
}
}
@ -696,10 +710,10 @@ func (adc *attachDetachController) processVolumesInUse(
//
// if yes, the reconciler will attempt attach on the volume;
// if not (could be a dangling attachment), the reconciler will detach this volume.
func (adc *attachDetachController) processVolumeAttachments() error {
func (adc *attachDetachController) processVolumeAttachments(logger klog.Logger) error {
vas, err := adc.volumeAttachmentLister.List(labels.Everything())
if err != nil {
klog.Errorf("failed to list VolumeAttachment objects: %v", err)
logger.Error(err, "Failed to list VolumeAttachment objects")
return err
}
for _, va := range vas {
@ -707,13 +721,12 @@ func (adc *attachDetachController) processVolumeAttachments() error {
pvName := va.Spec.Source.PersistentVolumeName
if pvName == nil {
// Currently VA objects are created for CSI volumes only. nil pvName is unexpected, generate a warning
klog.Warningf("Skipping the va as its pvName is nil, va.Name: %q, nodeName: %q",
va.Name, nodeName)
logger.Info("Skipping the va as its pvName is nil", "node", klog.KRef("", string(nodeName)), "vaName", va.Name)
continue
}
pv, err := adc.pvLister.Get(*pvName)
if err != nil {
klog.Errorf("Unable to lookup pv object for: %q, err: %v", *pvName, err)
logger.Error(err, "Unable to lookup pv object", "PV", klog.KRef("", *pvName))
continue
}
@ -730,13 +743,7 @@ func (adc *attachDetachController) processVolumeAttachments() error {
// podNamespace is not needed here for Azurefile as the volumeName generated will be the same with or without podNamespace
volumeSpec, err = csimigration.TranslateInTreeSpecToCSI(volumeSpec, "" /* podNamespace */, adc.intreeToCSITranslator)
if err != nil {
klog.Errorf(
"Failed to translate intree volumeSpec to CSI volumeSpec for volume:%q, va.Name:%q, nodeName:%q: %s. Error: %v",
*pvName,
va.Name,
nodeName,
inTreePluginName,
err)
logger.Error(err, "Failed to translate intree volumeSpec to CSI volumeSpec for volume", "node", klog.KRef("", string(nodeName)), "inTreePluginName", inTreePluginName, "vaName", va.Name, "PV", klog.KRef("", *pvName))
continue
}
}
@ -746,32 +753,22 @@ func (adc *attachDetachController) processVolumeAttachments() error {
plugin, err = adc.volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil {
// Currently VA objects are created for CSI volumes only. nil plugin is unexpected, generate a warning
klog.Warningf(
"Skipping processing the volume %q on nodeName: %q, no attacher interface found. err=%v",
*pvName,
nodeName,
err)
logger.Info("Skipping processing the volume on node, no attacher interface found", "node", klog.KRef("", string(nodeName)), "PV", klog.KRef("", *pvName), "err", err)
continue
}
}
volumeName, err := volumeutil.GetUniqueVolumeNameFromSpec(plugin, volumeSpec)
if err != nil {
klog.Errorf(
"Failed to find unique name for volume:%q, va.Name:%q, nodeName:%q: %v",
*pvName,
va.Name,
nodeName,
err)
logger.Error(err, "Failed to find unique name for volume", "node", klog.KRef("", string(nodeName)), "vaName", va.Name, "PV", klog.KRef("", *pvName))
continue
}
attachState := adc.actualStateOfWorld.GetAttachState(volumeName, nodeName)
if attachState == cache.AttachStateDetached {
klog.V(1).Infof("Marking volume attachment as uncertain as volume:%q (%q) is not attached (%v)",
volumeName, nodeName, attachState)
err = adc.actualStateOfWorld.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName)
logger.V(1).Info("Marking volume attachment as uncertain as volume is not attached", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName, "attachState", attachState)
err = adc.actualStateOfWorld.MarkVolumeAsUncertain(logger, volumeName, volumeSpec, nodeName)
if err != nil {
klog.Errorf("MarkVolumeAsUncertain fail to add the volume %q (%q) to ASW. err: %s", volumeName, nodeName, err)
logger.Error(err, "MarkVolumeAsUncertain fail to add the volume to ASW", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName)
}
}
}
@ -887,7 +884,7 @@ func (adc *attachDetachController) GetServiceAccountTokenFunc() func(_, _ string
func (adc *attachDetachController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {
klog.Errorf("DeleteServiceAccountToken unsupported in attachDetachController")
klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in attachDetachController")
}
}

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
kcache "k8s.io/client-go/tools/cache"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
@ -47,7 +48,9 @@ func Test_NewAttachDetachController_Positive(t *testing.T) {
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
// Act
logger, _ := ktesting.NewTestContext(t)
_, err := NewAttachDetachController(
logger,
fakeKubeClient,
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(),
@ -107,12 +110,13 @@ func Test_AttachDetachControllerStateOfWolrdPopulators_Positive(t *testing.T) {
adc.actualStateOfWorld = cache.NewActualStateOfWorld(&adc.volumePluginMgr)
adc.desiredStateOfWorld = cache.NewDesiredStateOfWorld(&adc.volumePluginMgr)
err := adc.populateActualStateOfWorld()
logger, _ := ktesting.NewTestContext(t)
err := adc.populateActualStateOfWorld(logger)
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
}
err = adc.populateDesiredStateOfWorld()
err = adc.populateDesiredStateOfWorld(logger)
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
}
@ -172,7 +176,11 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
var podsNum, extraPodsNum, nodesNum, i int
// Create the controller
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
adcObj, err := NewAttachDetachController(
logger,
fakeKubeClient,
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(),
@ -196,8 +204,6 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
adc := adcObj.(*attachDetachController)
stopCh := make(chan struct{})
pods, err := fakeKubeClient.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
@ -227,9 +233,9 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
csiNodeInformer.GetIndexer().Add(&csiNodeToAdd)
}
informerFactory.Start(stopCh)
informerFactory.Start(ctx.Done())
if !kcache.WaitForNamedCacheSync("attach detach", stopCh,
if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(),
informerFactory.Core().V1().Pods().Informer().HasSynced,
informerFactory.Core().V1().Nodes().Informer().HasSynced,
informerFactory.Storage().V1().CSINodes().Informer().HasSynced) {
@ -278,7 +284,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
}
// Populate ASW
err = adc.populateActualStateOfWorld()
err = adc.populateActualStateOfWorld(logger)
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
}
@ -295,7 +301,7 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
}
// Populate DSW
err = adc.populateDesiredStateOfWorld()
err = adc.populateDesiredStateOfWorld(logger)
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
}
@ -310,9 +316,8 @@ func attachDetachRecoveryTestCase(t *testing.T, extraPods1 []*v1.Pod, extraPods2
podInformer.GetIndexer().Add(newPod)
}
go adc.reconciler.Run(stopCh)
go adc.desiredStateOfWorldPopulator.Run(stopCh)
defer close(stopCh)
go adc.reconciler.Run(ctx)
go adc.desiredStateOfWorldPopulator.Run(ctx)
time.Sleep(time.Second * 1) // Wait so the reconciler calls sync at least once
@ -437,7 +442,11 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
vaInformer := informerFactory.Storage().V1().VolumeAttachments().Informer()
// Create the controller
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
adcObj, err := NewAttachDetachController(
logger,
fakeKubeClient,
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().Nodes(),
@ -537,10 +546,9 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
}
// Makesure the informer cache is synced
stopCh := make(chan struct{})
informerFactory.Start(stopCh)
informerFactory.Start(ctx.Done())
if !kcache.WaitForNamedCacheSync("attach detach", stopCh,
if !kcache.WaitForNamedCacheSync("attach detach", ctx.Done(),
informerFactory.Core().V1().Pods().Informer().HasSynced,
informerFactory.Core().V1().Nodes().Informer().HasSynced,
informerFactory.Core().V1().PersistentVolumes().Informer().HasSynced,
@ -549,21 +557,19 @@ func volumeAttachmentRecoveryTestCase(t *testing.T, tc vaTest) {
}
// Populate ASW
err = adc.populateActualStateOfWorld()
err = adc.populateActualStateOfWorld(logger)
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: <%v>", err)
}
// Populate DSW
err = adc.populateDesiredStateOfWorld()
err = adc.populateDesiredStateOfWorld(logger)
if err != nil {
t.Fatalf("Run failed with error. Expected: <no error> Actual: %v", err)
}
// Run reconciler and DSW populator loops
go adc.reconciler.Run(stopCh)
go adc.desiredStateOfWorldPopulator.Run(stopCh)
defer close(stopCh)
go adc.reconciler.Run(ctx)
go adc.desiredStateOfWorldPopulator.Run(ctx)
if tc.csiMigration {
verifyExpectedVolumeState(t, adc, tc)
} else {

View File

@ -23,13 +23,12 @@ package cache
import (
"fmt"
"k8s.io/klog/v2"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
@ -60,7 +59,7 @@ type ActualStateOfWorld interface {
// added.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added.
AddVolumeNode(uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, attached bool) (v1.UniqueVolumeName, error)
AddVolumeNode(logger klog.Logger, uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, attached bool) (v1.UniqueVolumeName, error)
// SetVolumeMountedByNode sets the MountedByNode value for the given volume
// and node. When set to true the mounted parameter indicates the volume
@ -72,23 +71,23 @@ type ActualStateOfWorld interface {
// returned.
// If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned.
SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
SetVolumeMountedByNode(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
// node to true indicating the AttachedVolume field in the Node's Status
// object needs to be updated by the node updater again.
// If the specified node does not exist in the nodesToUpdateStatusFor list,
// log the error and return
SetNodeStatusUpdateNeeded(nodeName types.NodeName)
SetNodeStatusUpdateNeeded(logger klog.Logger, nodeName types.NodeName)
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
// request any more for the volume
ResetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
ResetDetachRequestTime(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no
// previous request (the previous detachRequestedTime is zero) and return the time elapsed
// since last request
SetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
SetDetachRequestTime(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
// DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the
@ -135,12 +134,12 @@ type ActualStateOfWorld interface {
// this may differ from the actual list of attached volumes for the node
// since volumes should be removed from this list as soon a detach operation
// is considered, before the detach operation is triggered).
GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume
GetVolumesToReportAttached(logger klog.Logger) map[types.NodeName][]v1.AttachedVolume
// GetVolumesToReportAttachedForNode returns the list of volumes that should be reported as
// attached for the given node. It reports a boolean indicating if there is an update for that
// node and the corresponding attachedVolumes list.
GetVolumesToReportAttachedForNode(name types.NodeName) (bool, []v1.AttachedVolume)
GetVolumesToReportAttachedForNode(logger klog.Logger, name types.NodeName) (bool, []v1.AttachedVolume)
// GetNodesToUpdateStatusFor returns the map of nodeNames to nodeToUpdateStatusFor
GetNodesToUpdateStatusFor() map[types.NodeName]nodeToUpdateStatusFor
@ -279,15 +278,17 @@ type nodeToUpdateStatusFor struct {
}
func (asw *actualStateOfWorld) MarkVolumeAsUncertain(
logger klog.Logger,
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error {
_, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, "", false /* isAttached */)
_, err := asw.AddVolumeNode(logger, uniqueName, volumeSpec, nodeName, "", false /* isAttached */)
return err
}
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
logger klog.Logger,
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
_, err := asw.AddVolumeNode(uniqueName, volumeSpec, nodeName, devicePath, true)
_, err := asw.AddVolumeNode(logger, uniqueName, volumeSpec, nodeName, devicePath, true)
return err
}
@ -304,13 +305,15 @@ func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
}
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
asw.addVolumeToReportAsAttached(volumeName, nodeName)
asw.addVolumeToReportAsAttached(logger, volumeName, nodeName)
}
func (asw *actualStateOfWorld) AddVolumeNode(
logger klog.Logger,
uniqueName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string, isAttached bool) (v1.UniqueVolumeName, error) {
volumeName := uniqueName
if volumeName == "" {
@ -354,10 +357,10 @@ func (asw *actualStateOfWorld) AddVolumeNode(
// Update the fields for volume object except the nodes attached to the volumes.
volumeObj.devicePath = devicePath
volumeObj.spec = volumeSpec
klog.V(2).Infof("Volume %q is already added to attachedVolume list to node %q, update device path %q",
volumeName,
nodeName,
devicePath)
logger.V(2).Info("Volume is already added to attachedVolume list to node, update device path",
"volumeName", volumeName,
"node", klog.KRef("", string(nodeName)),
"devicePath", devicePath)
}
node, nodeExists := volumeObj.nodesAttachedTo[nodeName]
if !nodeExists {
@ -370,22 +373,23 @@ func (asw *actualStateOfWorld) AddVolumeNode(
}
} else {
node.attachedConfirmed = isAttached
klog.V(5).Infof("Volume %q is already added to attachedVolume list to the node %q, the current attach state is %t",
volumeName,
nodeName,
isAttached)
logger.V(5).Info("Volume is already added to attachedVolume list to the node",
"volumeName", volumeName,
"node", klog.KRef("", string(nodeName)),
"currentAttachState", isAttached)
}
volumeObj.nodesAttachedTo[nodeName] = node
asw.attachedVolumes[volumeName] = volumeObj
if isAttached {
asw.addVolumeToReportAsAttached(volumeName, nodeName)
asw.addVolumeToReportAsAttached(logger, volumeName, nodeName)
}
return volumeName, nil
}
func (asw *actualStateOfWorld) SetVolumeMountedByNode(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
asw.Lock()
defer asw.Unlock()
@ -397,21 +401,22 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
nodeObj.mountedByNode = mounted
volumeObj.nodesAttachedTo[nodeName] = nodeObj
klog.V(4).Infof("SetVolumeMountedByNode volume %v to the node %q mounted %t",
volumeName,
nodeName,
mounted)
logger.V(4).Info("SetVolumeMountedByNode volume to the node",
"node", klog.KRef("", string(nodeName)),
"volumeName", volumeName,
"mounted", mounted)
return nil
}
func (asw *actualStateOfWorld) ResetDetachRequestTime(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
volumeObj, nodeObj, err := asw.getNodeAndVolume(volumeName, nodeName)
if err != nil {
klog.Errorf("Failed to ResetDetachRequestTime with error: %v", err)
logger.Error(err, "Failed to ResetDetachRequestTime with error")
return
}
nodeObj.detachRequestedTime = time.Time{}
@ -419,6 +424,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime(
}
func (asw *actualStateOfWorld) SetDetachRequestTime(
logger klog.Logger,
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
asw.Lock()
defer asw.Unlock()
@ -431,9 +437,9 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
if nodeObj.detachRequestedTime.IsZero() {
nodeObj.detachRequestedTime = time.Now()
volumeObj.nodesAttachedTo[nodeName] = nodeObj
klog.V(4).Infof("Set detach request time to current time for volume %v on node %q",
volumeName,
nodeName)
logger.V(4).Info("Set detach request time to current time for volume on node",
"node", klog.KRef("", string(nodeName)),
"volumeName", volumeName)
}
return time.Since(nodeObj.detachRequestedTime), nil
}
@ -488,10 +494,10 @@ func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
// Add the volumeName to the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// In case the volume/node entry is no longer in attachedVolume list, skip the rest
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
klog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
logger.V(4).Info("Volume is no longer attached to node", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName)
return
}
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
@ -503,7 +509,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName),
}
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
klog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName)
logger.V(4).Info("Add new node to nodesToUpdateStatusFor", "node", klog.KRef("", string(nodeName)))
}
_, nodeToUpdateVolumeExists :=
nodeToUpdate.volumesToReportAsAttached[volumeName]
@ -511,7 +517,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
nodeToUpdate.statusUpdateNeeded = true
nodeToUpdate.volumesToReportAsAttached[volumeName] = volumeName
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
klog.V(4).Infof("Report volume %q as attached to node %q", volumeName, nodeName)
logger.V(4).Info("Report volume as attached to node", "node", klog.KRef("", string(nodeName)), "volumeName", volumeName)
}
}
@ -534,11 +540,11 @@ func (asw *actualStateOfWorld) updateNodeStatusUpdateNeeded(nodeName types.NodeN
return nil
}
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName) {
func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(logger klog.Logger, nodeName types.NodeName) {
asw.Lock()
defer asw.Unlock()
if err := asw.updateNodeStatusUpdateNeeded(nodeName, true); err != nil {
klog.Warningf("Failed to update statusUpdateNeeded field in actual state of world: %v", err)
logger.Info("Failed to update statusUpdateNeeded field in actual state of world", "err", err)
}
}
@ -584,8 +590,8 @@ func (asw *actualStateOfWorld) GetAttachState(
}
// SetVolumeClaimSize sets size of the volume. But this function should not be used from attach_detach controller.
func (asw *actualStateOfWorld) InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) {
klog.V(5).Infof("no-op InitializeClaimSize call in attach-detach controller.")
func (asw *actualStateOfWorld) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) {
logger.V(5).Info("no-op InitializeClaimSize call in attach-detach controller")
}
func (asw *actualStateOfWorld) GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity {
@ -663,7 +669,7 @@ func (asw *actualStateOfWorld) GetNodesForAttachedVolume(volumeName v1.UniqueVol
return nodes
}
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume {
func (asw *actualStateOfWorld) GetVolumesToReportAttached(logger klog.Logger) map[types.NodeName][]v1.AttachedVolume {
asw.Lock()
defer asw.Unlock()
@ -676,14 +682,14 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating
// the current status is already updated.
if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil {
klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err)
logger.Error(err, "Failed to update statusUpdateNeeded field when getting volumes")
}
}
return volumesToReportAttached
}
func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(nodeName types.NodeName) (bool, []v1.AttachedVolume) {
func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(logger klog.Logger, nodeName types.NodeName) (bool, []v1.AttachedVolume) {
asw.Lock()
defer asw.Unlock()
@ -700,7 +706,7 @@ func (asw *actualStateOfWorld) GetVolumesToReportAttachedForNode(nodeName types.
// of this node will be updated, so set the flag statusUpdateNeeded to false indicating
// the current status is already updated.
if err := asw.updateNodeStatusUpdateNeeded(nodeName, false); err != nil {
klog.Errorf("Failed to update statusUpdateNeeded field when getting volumes: %v", err)
logger.Error(err, "Failed to update statusUpdateNeeded field when getting volumes")
}
return true, volumesToReportAttached

View File

@ -22,6 +22,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2/ktesting"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
@ -40,7 +41,8 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
devicePath := "fake/device/path"
// Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert
if err != nil {
@ -75,7 +77,8 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T)
devicePath := "fake/device/path"
// Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, false)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, false)
// Assert
if err != nil {
@ -93,7 +96,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T)
}
verifyAttachedVolume(t, allVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached()
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
_, exists := reportAsAttachedVolumesMap[nodeName]
if exists {
t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Actual: <node %q exist> Expect: <node does not exist in the reportedAsAttached map", nodeName)
@ -117,7 +120,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached(t *testing.T)
}
// Add the volume to the node second time with attached set to true
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert
if add2Err != nil {
@ -175,7 +178,8 @@ func Test_AddVolumeNode_Positive_NewVolumeTwoNodesWithFalseAttached(t *testing.T
devicePath := "fake/device/path"
// Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, false)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, false)
// Assert
if err != nil {
@ -187,7 +191,7 @@ func Test_AddVolumeNode_Positive_NewVolumeTwoNodesWithFalseAttached(t *testing.T
t.Fatalf("%q/%q volume/node combo is marked %q, expected 'Uncertain'.", generatedVolumeName, node1Name, volumeNodeComboState)
}
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, node2Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node2Name, devicePath, true)
// Assert
if add2Err != nil {
@ -230,7 +234,7 @@ func Test_AddVolumeNode_Positive_NewVolumeTwoNodesWithFalseAttached(t *testing.T
t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Expect one node returned.")
}
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached()
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
reportedVolumes, exists := reportAsAttachedVolumesMap[node2Name]
if !exists || len(reportedVolumes) != 1 {
t.Fatalf("AddVolumeNode_Positive_NewVolumeNewNodeWithFalseAttached failed. Actual: <node %q exist> Expect: <node does not exist in the reportedAsAttached map", node2Name)
@ -250,8 +254,9 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
devicePath := "fake/device/path"
// Act
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, node2Name, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node2Name, devicePath, true)
// Assert
if add1Err != nil {
@ -299,8 +304,9 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
devicePath := "fake/device/path"
// Act
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert
if add1Err != nil {
@ -341,7 +347,8 @@ func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -398,11 +405,12 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
node1Name := types.NodeName("node1-name")
node2Name := types.NodeName("node2-name")
devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
generatedVolumeName2, add2Err := asw.AddVolumeNode(volumeName, volumeSpec, node2Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volumeName, volumeSpec, node2Name, devicePath, true)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
@ -446,7 +454,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -479,7 +488,8 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
node1Name := types.NodeName("node1-name")
node2Name := types.NodeName("node2-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, node1Name, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, node1Name, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -550,7 +560,8 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -577,14 +588,15 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path"
generatedVolumeName1, add1Err := asw.AddVolumeNode(volume1Name, volume1Spec, node1Name, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, volume1Name, volume1Spec, node1Name, devicePath, true)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Name, volume2Spec, node2Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volume2Name, volume2Spec, node2Name, devicePath, true)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
@ -620,12 +632,13 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
if err != nil || plugin == nil {
t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err)
}
generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath, true)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
@ -659,7 +672,8 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -686,14 +700,15 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert
if setVolumeMountedErr1 != nil {
@ -722,7 +737,8 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -735,7 +751,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
verifyAttachedVolume(t, attachedVolumes, generatedVolumeName, string(volumeName), nodeName, devicePath, true /* expectedMountedByNode */, false /* expectNonZeroDetachRequestedTime */)
// Act
setVolumeMountedErr := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
setVolumeMountedErr := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert
if setVolumeMountedErr != nil {
@ -762,15 +778,16 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
generatedVolumeName, addErr = asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
generatedVolumeName, addErr = asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert
if setVolumeMountedErr1 != nil {
@ -803,11 +820,12 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName)
_, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
}
@ -818,8 +836,8 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
expectedDetachRequestedTime := asw.GetAttachedVolumes()[0].DetachRequestedTime
// Act
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert
if setVolumeMountedErr1 != nil {
@ -850,7 +868,8 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) {
devicePath := "fake/device/path"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -877,13 +896,14 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName)
_, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
}
@ -913,19 +933,20 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
// Act
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName)
_, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
}
markDesireToDetachErr := asw.RemoveVolumeFromReportAsAttached(generatedVolumeName, nodeName)
// Reset detach request time to 0
asw.ResetDetachRequestTime(generatedVolumeName, nodeName)
asw.ResetDetachRequestTime(logger, generatedVolumeName, nodeName)
// Assert
if markDesireToDetachErr != nil {
@ -956,12 +977,13 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
setVolumeMountedErr1 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
setVolumeMountedErr2 := asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
if setVolumeMountedErr1 != nil {
t.Fatalf("SetVolumeMountedByNode1 failed. Expected <no error> Actual: <%v>", setVolumeMountedErr1)
}
@ -970,7 +992,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
}
// Act
_, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName)
_, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
}
@ -999,7 +1021,8 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -1009,7 +1032,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
}
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached()
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
volumes, exists := reportAsAttachedVolumesMap[nodeName]
if !exists {
t.Fatalf("MarkDesireToDetach_UnmarkDesireToDetach failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1032,7 +1055,8 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -1042,7 +1066,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
}
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached()
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
volumes, exists := reportAsAttachedVolumesMap[nodeName]
if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1051,8 +1075,8 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
t.Fatalf("len(reportAsAttachedVolumes) Expected: <0> Actual: <%v>", len(volumes))
}
asw.AddVolumeToReportAsAttached(generatedVolumeName, nodeName)
reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached()
asw.AddVolumeToReportAsAttached(logger, generatedVolumeName, nodeName)
reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached(logger)
volumes, exists = reportAsAttachedVolumesMap[nodeName]
if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive failed. Expected: <node %q exist> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1075,7 +1099,8 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -1085,7 +1110,7 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
}
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached()
reportAsAttachedVolumesMap := asw.GetVolumesToReportAttached(logger)
volumes, exists := reportAsAttachedVolumesMap[nodeName]
if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode failed. Expected: <node %q exists> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1096,9 +1121,9 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
asw.DeleteVolumeNode(generatedVolumeName, nodeName)
asw.AddVolumeNode(volumeName, volumeSpec, nodeName, "" /*device path*/, true)
asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, "" /*device path*/, true)
reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached()
reportAsAttachedVolumesMap = asw.GetVolumesToReportAttached(logger)
volumes, exists = reportAsAttachedVolumesMap[nodeName]
if !exists {
t.Fatalf("Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode failed. Expected: <node %q exists> Actual: <node does not exist in the reportedAsAttached map", nodeName)
@ -1120,13 +1145,14 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
maxWaitTime := 1 * time.Second
etime, err := asw.SetDetachRequestTime(generatedVolumeName, nodeName)
etime, err := asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
}
@ -1135,7 +1161,7 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
}
// Sleep and call SetDetachRequestTime again
time.Sleep(maxWaitTime)
etime, err = asw.SetDetachRequestTime(generatedVolumeName, nodeName)
etime, err = asw.SetDetachRequestTime(logger, generatedVolumeName, nodeName)
if err != nil {
t.Fatalf("SetDetachRequestTime failed. Expected: <no error> Actual: <%v>", err)
}
@ -1167,7 +1193,8 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name")
devicePath := "fake/device/path"
generatedVolumeName, addErr := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, addErr := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
if addErr != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", addErr)
}
@ -1191,14 +1218,15 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path"
_, add1Err := asw.AddVolumeNode(volume1Name, volume1Spec, node1Name, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
_, add1Err := asw.AddVolumeNode(logger, volume1Name, volume1Spec, node1Name, devicePath, true)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Name, volume2Spec, node2Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, volume2Name, volume2Spec, node2Name, devicePath, true)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
@ -1222,6 +1250,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path"
logger, _ := ktesting.NewTestContext(t)
plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil {
t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err)
@ -1230,12 +1259,12 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
if err != nil || plugin == nil {
t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err)
}
generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath, true)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath, true)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath, true)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
@ -1266,6 +1295,7 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name")
devicePath1 := "fake/device/path1"
logger, _ := ktesting.NewTestContext(t)
plugin, err := volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || plugin == nil {
t.Fatalf("Failed to get volume plugin from spec %v, %v", volumeSpec, err)
@ -1274,13 +1304,13 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
if err != nil || plugin == nil {
t.Fatalf("Failed to get uniqueVolumeName from spec %v, %v", volumeSpec, err)
}
generatedVolumeName1, add1Err := asw.AddVolumeNode(uniqueVolumeName, volumeSpec, node1Name, devicePath1, true)
generatedVolumeName1, add1Err := asw.AddVolumeNode(logger, uniqueVolumeName, volumeSpec, node1Name, devicePath1, true)
if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
}
node2Name := types.NodeName("node2-name")
devicePath2 := "fake/device/path2"
generatedVolumeName2, add2Err := asw.AddVolumeNode(v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath2, true)
generatedVolumeName2, add2Err := asw.AddVolumeNode(logger, v1.UniqueVolumeName(""), volumeSpec, node2Name, devicePath2, true)
if add2Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add2Err)
}
@ -1313,7 +1343,8 @@ func Test_SetNodeStatusUpdateNeededError(t *testing.T) {
nodeName := types.NodeName("node-1")
// Act
asw.SetNodeStatusUpdateNeeded(nodeName)
logger, _ := ktesting.NewTestContext(t)
asw.SetNodeStatusUpdateNeeded(logger, nodeName)
// Assert
nodesToUpdateStatusFor := asw.GetNodesToUpdateStatusFor()
@ -1393,7 +1424,8 @@ func Test_MarkVolumeAsAttached(t *testing.T) {
}
// Act
err = asw.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, devicePath)
// Assert
if err != nil {
@ -1429,7 +1461,8 @@ func Test_MarkVolumeAsUncertain(t *testing.T) {
}
// Act
err = asw.MarkVolumeAsUncertain(volumeName, volumeSpec, nodeName)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsUncertain(logger, volumeName, volumeSpec, nodeName)
// Assert
if err != nil {
@ -1464,14 +1497,15 @@ func Test_GetVolumesToReportAttachedForNode_Positive(t *testing.T) {
devicePath := "fake/device/path"
// Act
generatedVolumeName, err := asw.AddVolumeNode(volumeName, volumeSpec, nodeName, devicePath, true)
logger, _ := ktesting.NewTestContext(t)
generatedVolumeName, err := asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, devicePath, true)
// Assert
if err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err)
}
needsUpdate, attachedVolumes := asw.GetVolumesToReportAttachedForNode(nodeName)
needsUpdate, attachedVolumes := asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if !needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q does not need an update> Expect: <node exists in the reportedAsAttached map and needs an update", nodeName)
}
@ -1479,7 +1513,7 @@ func Test_GetVolumesToReportAttachedForNode_Positive(t *testing.T) {
t.Fatalf("len(attachedVolumes) Expected: <1> Actual: <%v>", len(attachedVolumes))
}
needsUpdate, _ = asw.GetVolumesToReportAttachedForNode(nodeName)
needsUpdate, _ = asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q needs an update> Expect: <node exists in the reportedAsAttached map and does not need an update", nodeName)
}
@ -1489,7 +1523,7 @@ func Test_GetVolumesToReportAttachedForNode_Positive(t *testing.T) {
t.Fatalf("RemoveVolumeFromReportAsAttached failed. Expected: <no error> Actual: <%v>", removeVolumeDetachErr)
}
needsUpdate, attachedVolumes = asw.GetVolumesToReportAttachedForNode(nodeName)
needsUpdate, attachedVolumes = asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if !needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_Positive_NewVolumeNewNodeWithTrueAttached failed. Actual: <node %q does not need an update> Expect: <node exists in the reportedAsAttached map and needs an update", nodeName)
}
@ -1504,8 +1538,8 @@ func Test_GetVolumesToReportAttachedForNode_UnknownNode(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr)
nodeName := types.NodeName("node-name")
needsUpdate, _ := asw.GetVolumesToReportAttachedForNode(nodeName)
logger, _ := ktesting.NewTestContext(t)
needsUpdate, _ := asw.GetVolumesToReportAttachedForNode(logger, nodeName)
if needsUpdate {
t.Fatalf("GetVolumesToReportAttachedForNode_UnknownNode failed. Actual: <node %q needs an update> Expect: <node does not exist in the reportedAsAttached map and does not need an update", nodeName)
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package metrics
import (
"errors"
"sync"
"k8s.io/apimachinery/pkg/labels"
@ -129,7 +130,7 @@ func (collector *attachDetachStateCollector) DescribeWithStability(ch chan<- *me
}
func (collector *attachDetachStateCollector) CollectWithStability(ch chan<- metrics.Metric) {
nodeVolumeMap := collector.getVolumeInUseCount()
nodeVolumeMap := collector.getVolumeInUseCount(klog.TODO())
for nodeName, pluginCount := range nodeVolumeMap {
for pluginName, count := range pluginCount {
ch <- metrics.NewLazyConstMetric(inUseVolumeMetricDesc,
@ -152,10 +153,10 @@ func (collector *attachDetachStateCollector) CollectWithStability(ch chan<- metr
}
}
func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount {
func (collector *attachDetachStateCollector) getVolumeInUseCount(logger klog.Logger) volumeCount {
pods, err := collector.podLister.List(labels.Everything())
if err != nil {
klog.Errorf("Error getting pod list")
logger.Error(errors.New("Error getting pod list"), "Get pod list failed")
return nil
}
@ -169,7 +170,7 @@ func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount {
continue
}
for _, podVolume := range pod.Spec.Volumes {
volumeSpec, err := util.CreateVolumeSpec(podVolume, pod, types.NodeName(pod.Spec.NodeName), collector.volumePluginMgr, collector.pvcLister, collector.pvLister, collector.csiMigratedPluginManager, collector.intreeToCSITranslator)
volumeSpec, err := util.CreateVolumeSpec(logger, podVolume, pod, types.NodeName(pod.Spec.NodeName), collector.volumePluginMgr, collector.pvcLister, collector.pvLister, collector.csiMigratedPluginManager, collector.intreeToCSITranslator)
if err != nil {
continue
}

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
@ -121,7 +122,8 @@ func TestVolumesInUseMetricCollection(t *testing.T) {
fakeVolumePluginMgr,
csimigration.NewPluginManager(csiTranslator, utilfeature.DefaultFeatureGate),
csiTranslator)
nodeUseMap := metricCollector.getVolumeInUseCount()
logger, _ := ktesting.NewTestContext(t)
nodeUseMap := metricCollector.getVolumeInUseCount(logger)
if len(nodeUseMap) < 1 {
t.Errorf("Expected one volume in use got %d", len(nodeUseMap))
}
@ -150,7 +152,8 @@ func TestTotalVolumesMetricCollection(t *testing.T) {
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
asw.AddVolumeNode(volumeName, volumeSpec, nodeName, "", true)
logger, _ := ktesting.NewTestContext(t)
asw.AddVolumeNode(logger, volumeName, volumeSpec, nodeName, "", true)
csiTranslator := csitrans.New()
metricCollector := newAttachDetachStateCollector(

View File

@ -19,6 +19,7 @@ limitations under the License.
package populator
import (
"context"
"fmt"
"time"
@ -43,7 +44,7 @@ import (
// each one exists in the desired state of the world cache
// if it has volumes.
type DesiredStateOfWorldPopulator interface {
Run(stopCh <-chan struct{})
Run(ctx context.Context)
}
// NewDesiredStateOfWorldPopulator returns a new instance of DesiredStateOfWorldPopulator.
@ -90,35 +91,36 @@ type desiredStateOfWorldPopulator struct {
intreeToCSITranslator csimigration.InTreeToCSITranslator
}
func (dswp *desiredStateOfWorldPopulator) Run(stopCh <-chan struct{}) {
wait.Until(dswp.populatorLoopFunc(), dswp.loopSleepDuration, stopCh)
func (dswp *desiredStateOfWorldPopulator) Run(ctx context.Context) {
wait.UntilWithContext(ctx, dswp.populatorLoopFunc(ctx), dswp.loopSleepDuration)
}
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() {
return func() {
dswp.findAndRemoveDeletedPods()
func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc(ctx context.Context) func(ctx context.Context) {
return func(ctx context.Context) {
logger := klog.FromContext(ctx)
dswp.findAndRemoveDeletedPods(logger)
// findAndAddActivePods is called periodically, independently of the main
// populator loop.
if time.Since(dswp.timeOfLastListPods) < dswp.listPodsRetryDuration {
klog.V(5).Infof(
"Skipping findAndAddActivePods(). Not permitted until %v (listPodsRetryDuration %v).",
dswp.timeOfLastListPods.Add(dswp.listPodsRetryDuration),
dswp.listPodsRetryDuration)
logger.V(5).Info(
"Skipping findAndAddActivePods(). Not permitted until the retry time is reached",
"retryTime", dswp.timeOfLastListPods.Add(dswp.listPodsRetryDuration),
"retryDuration", dswp.listPodsRetryDuration)
return
}
dswp.findAndAddActivePods()
dswp.findAndAddActivePods(logger)
}
}
// Iterate through all pods in desired state of world, and remove if they no
// longer exist in the informer
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods(logger klog.Logger) {
for dswPodUID, dswPodToAdd := range dswp.desiredStateOfWorld.GetPodToAdd() {
dswPodKey, err := kcache.MetaNamespaceKeyFunc(dswPodToAdd.Pod)
if err != nil {
klog.Errorf("MetaNamespaceKeyFunc failed for pod %q (UID %q) with: %v", dswPodKey, dswPodUID, err)
logger.Error(err, "MetaNamespaceKeyFunc failed for pod", "podName", dswPodKey, "podUID", dswPodUID)
continue
}
@ -133,7 +135,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
case errors.IsNotFound(err):
// if we can't find the pod, we need to delete it below
case err != nil:
klog.Errorf("podLister Get failed for pod %q (UID %q) with %v", dswPodKey, dswPodUID, err)
logger.Error(err, "podLister Get failed for pod", "podName", dswPodKey, "podUID", dswPodUID)
continue
default:
volumeActionFlag := util.DetermineVolumeAction(
@ -145,7 +147,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
informerPodUID := volutil.GetUniquePodName(informerPod)
// Check whether the unique identifier of the pod from dsw matches the one retrieved from pod informer
if informerPodUID == dswPodUID {
klog.V(10).Infof("Verified pod %q (UID %q) from dsw exists in pod informer.", dswPodKey, dswPodUID)
logger.V(10).Info("Verified podfrom dsw exists in pod informer", "podName", dswPodKey, "podUID", dswPodUID)
continue
}
}
@ -153,7 +155,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
// the pod from dsw does not exist in pod informer, or it does not match the unique identifier retrieved
// from the informer, delete it from dsw
klog.V(1).Infof("Removing pod %q (UID %q) from dsw because it does not exist in pod informer.", dswPodKey, dswPodUID)
logger.V(1).Info("Removing pod from dsw because it does not exist in pod informer", "podName", dswPodKey, "podUID", dswPodUID)
dswp.desiredStateOfWorld.DeletePod(dswPodUID, dswPodToAdd.VolumeName, dswPodToAdd.NodeName)
}
@ -163,21 +165,21 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
// The result is returned from CSIDriverLister which is from local cache. So this is not an expensive call.
volumeAttachable := volutil.IsAttachableVolume(volumeToAttach.VolumeSpec, dswp.volumePluginMgr)
if !volumeAttachable {
klog.Infof("Volume %v changes from attachable to non-attachable.", volumeToAttach.VolumeName)
logger.Info("Volume changes from attachable to non-attachable", "volumeName", volumeToAttach.VolumeName)
for _, scheduledPod := range volumeToAttach.ScheduledPods {
podUID := volutil.GetUniquePodName(scheduledPod)
dswp.desiredStateOfWorld.DeletePod(podUID, volumeToAttach.VolumeName, volumeToAttach.NodeName)
klog.V(4).Infof("Removing podUID: %v, volume: %v on node: %v from desired state of world"+
" because of the change of volume attachability.", podUID, volumeToAttach.VolumeName, volumeToAttach.NodeName)
logger.V(4).Info("Removing podUID and volume on node from desired state of world"+
" because of the change of volume attachability", "node", klog.KRef("", string(volumeToAttach.NodeName)), "podUID", podUID, "volumeName", volumeToAttach.VolumeName)
}
}
}
}
func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() {
func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods(logger klog.Logger) {
pods, err := dswp.podLister.List(labels.Everything())
if err != nil {
klog.Errorf("podLister List failed: %v", err)
logger.Error(err, "PodLister List failed")
return
}
dswp.timeOfLastListPods = time.Now()
@ -187,7 +189,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndAddActivePods() {
// Do not add volumes for terminated pods
continue
}
util.ProcessPodVolumes(pod, true,
util.ProcessPodVolumes(logger, pod, true,
dswp.desiredStateOfWorld, dswp.volumePluginMgr, dswp.pvcLister, dswp.pvLister, dswp.csiMigratedPluginManager, dswp.intreeToCSITranslator)
}

View File

@ -27,6 +27,7 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/volume/csimigration"
@ -91,8 +92,8 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
//add the given node to the list of nodes managed by dsw
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/)
dswp.findAndAddActivePods()
logger, _ := ktesting.NewTestContext(t)
dswp.findAndAddActivePods(logger)
expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)
@ -118,7 +119,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
}
//add pod and volume again
dswp.findAndAddActivePods()
dswp.findAndAddActivePods(logger)
//check if the given volume referenced by the pod is added to dsw for the second time
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
@ -130,7 +131,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) {
}
fakePodInformer.Informer().GetStore().Delete(pod)
dswp.findAndRemoveDeletedPods()
dswp.findAndRemoveDeletedPods(logger)
//check if the given volume referenced by the pod still exists in dsw
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))
if volumeExists {
@ -196,8 +197,8 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
//add the given node to the list of nodes managed by dsw
dswp.desiredStateOfWorld.AddNode(k8stypes.NodeName(pod.Spec.NodeName), false /*keepTerminatedPodVolumes*/)
dswp.findAndAddActivePods()
logger, _ := ktesting.NewTestContext(t)
dswp.findAndAddActivePods(logger)
expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)
@ -213,7 +214,7 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
// Change the CSI volume plugin attachability
fakeVolumePlugin.NonAttachable = true
dswp.findAndRemoveDeletedPods()
dswp.findAndRemoveDeletedPods(logger)
// The volume should not exist after it becomes non-attachable
volumeExists = dswp.desiredStateOfWorld.VolumeExists(expectedVolumeName, k8stypes.NodeName(pod.Spec.NodeName))

View File

@ -20,6 +20,7 @@ limitations under the License.
package reconciler
import (
"context"
"fmt"
"strings"
"time"
@ -53,7 +54,7 @@ type Reconciler interface {
// if volumes that should be attached are attached and volumes that should
// be detached are detached. If not, it will trigger attach/detach
// operations to rectify.
Run(stopCh <-chan struct{})
Run(ctx context.Context)
}
// NewReconciler returns a new instance of Reconciler that waits loopPeriod
@ -105,24 +106,24 @@ type reconciler struct {
recorder record.EventRecorder
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
wait.Until(rc.reconciliationLoopFunc(), rc.loopPeriod, stopCh)
func (rc *reconciler) Run(ctx context.Context) {
wait.UntilWithContext(ctx, rc.reconciliationLoopFunc(ctx), rc.loopPeriod)
}
// reconciliationLoopFunc this can be disabled via cli option disableReconciliation.
// It periodically checks whether the attached volumes from actual state
// are still attached to the node and update the status if they are not.
func (rc *reconciler) reconciliationLoopFunc() func() {
return func() {
rc.reconcile()
func (rc *reconciler) reconciliationLoopFunc(ctx context.Context) func(context.Context) {
return func(ctx context.Context) {
rc.reconcile(ctx)
logger := klog.FromContext(ctx)
if rc.disableReconciliationSync {
klog.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line.")
logger.V(5).Info("Skipping reconciling attached volumes still attached since it is disabled via the command line")
} else if rc.syncDuration < time.Second {
klog.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line.")
logger.V(5).Info("Skipping reconciling attached volumes still attached since it is set to less than one second via the command line")
} else if time.Since(rc.timeOfLastSync) > rc.syncDuration {
klog.V(5).Info("Starting reconciling attached volumes still attached")
logger.V(5).Info("Starting reconciling attached volumes still attached")
rc.sync()
}
}
@ -164,11 +165,12 @@ func (rc *reconciler) nodeIsHealthy(nodeName types.NodeName) (bool, error) {
return nodeutil.IsNodeReady(node), nil
}
func (rc *reconciler) reconcile() {
func (rc *reconciler) reconcile(ctx context.Context) {
// Detaches are triggered before attaches so that volumes referenced by
// pods that are rescheduled to a different node are detached first.
// Ensure volumes that should be detached are detached.
logger := klog.FromContext(ctx)
for _, attachedVolume := range rc.actualStateOfWorld.GetAttachedVolumes() {
if !rc.desiredStateOfWorld.VolumeExists(
attachedVolume.VolumeName, attachedVolume.NodeName) {
@ -182,12 +184,12 @@ func (rc *reconciler) reconcile() {
// allows multi attach across different nodes.
if util.IsMultiAttachAllowed(attachedVolume.VolumeSpec) {
if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, attachedVolume.NodeName, operationexecutor.DetachOperationName) {
klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff for node %q. Can't start detach", attachedVolume.VolumeName, attachedVolume.NodeName)
logger.V(10).Info("Operation for volume is already running or still in exponential backoff for node. Can't start detach", "node", klog.KRef("", string(attachedVolume.NodeName)), "volumeName", attachedVolume.VolumeName)
continue
}
} else {
if !rc.attacherDetacher.IsOperationSafeToRetry(attachedVolume.VolumeName, "" /* podName */, "" /* nodeName */, operationexecutor.DetachOperationName) {
klog.V(10).Infof("Operation for volume %q is already running or still in exponential backoff in the cluster. Can't start detach for %q", attachedVolume.VolumeName, attachedVolume.NodeName)
logger.V(10).Info("Operation for volume is already running or still in exponential backoff in the cluster. Can't start detach for node", "node", klog.KRef("", string(attachedVolume.NodeName)), "volumeName", attachedVolume.VolumeName)
continue
}
}
@ -201,14 +203,14 @@ func (rc *reconciler) reconcile() {
// See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName)
if attachState == cache.AttachStateDetached {
klog.V(5).InfoS("Volume detached--skipping", "volume", attachedVolume)
logger.V(5).Info("Volume detached--skipping", "volume", attachedVolume)
continue
}
// Set the detach request time
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(attachedVolume.VolumeName, attachedVolume.NodeName)
elapsedTime, err := rc.actualStateOfWorld.SetDetachRequestTime(logger, attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
klog.Errorf("Cannot trigger detach because it fails to set detach request time with error %v", err)
logger.Error(err, "Cannot trigger detach because it fails to set detach request time with error")
continue
}
// Check whether timeout has reached the maximum waiting time
@ -216,7 +218,7 @@ func (rc *reconciler) reconcile() {
isHealthy, err := rc.nodeIsHealthy(attachedVolume.NodeName)
if err != nil {
klog.Errorf("failed to get health of node %s: %s", attachedVolume.NodeName, err.Error())
logger.Error(err, "Failed to get health of node", "node", klog.KRef("", string(attachedVolume.NodeName)))
}
// Force detach volumes from unhealthy nodes after maxWaitForUnmountDuration.
@ -224,13 +226,13 @@ func (rc *reconciler) reconcile() {
hasOutOfServiceTaint, err := rc.hasOutOfServiceTaint(attachedVolume.NodeName)
if err != nil {
klog.Errorf("failed to get taint specs for node %s: %s", attachedVolume.NodeName, err.Error())
logger.Error(err, "Failed to get taint specs for node", "node", klog.KRef("", string(attachedVolume.NodeName)))
}
// Check whether volume is still mounted. Skip detach if it is still mounted unless force detach timeout
// or the node has `node.kubernetes.io/out-of-service` taint.
if attachedVolume.MountedByNode && !forceDetach && !hasOutOfServiceTaint {
klog.V(5).InfoS("Cannot detach volume because it is still mounted", "volume", attachedVolume)
logger.V(5).Info("Cannot detach volume because it is still mounted", "volume", attachedVolume)
continue
}
@ -240,77 +242,77 @@ func (rc *reconciler) reconcile() {
// has the correct volume attachment information.
err = rc.actualStateOfWorld.RemoveVolumeFromReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
if err != nil {
klog.V(5).Infof("RemoveVolumeFromReportAsAttached failed while removing volume %q from node %q with: %v",
attachedVolume.VolumeName,
attachedVolume.NodeName,
err)
logger.V(5).Info("RemoveVolumeFromReportAsAttached failed while removing volume from node",
"node", klog.KRef("", string(attachedVolume.NodeName)),
"volumeName", attachedVolume.VolumeName,
"err", err)
}
// Update Node Status to indicate volume is no longer safe to mount.
err = rc.nodeStatusUpdater.UpdateNodeStatusForNode(attachedVolume.NodeName)
err = rc.nodeStatusUpdater.UpdateNodeStatusForNode(logger, attachedVolume.NodeName)
if err != nil {
// Skip detaching this volume if unable to update node status
klog.ErrorS(err, "UpdateNodeStatusForNode failed while attempting to report volume as attached", "volume", attachedVolume)
logger.Error(err, "UpdateNodeStatusForNode failed while attempting to report volume as attached", "volume", attachedVolume)
// Add volume back to ReportAsAttached if UpdateNodeStatusForNode call failed so that node status updater will add it back to VolumeAttached list.
// It is needed here too because DetachVolume is not call actually and we keep the data consistency for every reconcile.
rc.actualStateOfWorld.AddVolumeToReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
rc.actualStateOfWorld.AddVolumeToReportAsAttached(logger, attachedVolume.VolumeName, attachedVolume.NodeName)
continue
}
// Trigger detach volume which requires verifying safe to detach step
// If timeout is true, skip verifySafeToDetach check
// If the node has node.kubernetes.io/out-of-service taint with NoExecute effect, skip verifySafeToDetach check
klog.V(5).InfoS("Starting attacherDetacher.DetachVolume", "volume", attachedVolume)
logger.V(5).Info("Starting attacherDetacher.DetachVolume", "volume", attachedVolume)
if hasOutOfServiceTaint {
klog.V(4).Infof("node %q has out-of-service taint", attachedVolume.NodeName)
logger.V(4).Info("node has out-of-service taint", "node", klog.KRef("", string(attachedVolume.NodeName)))
}
verifySafeToDetach := !(timeout || hasOutOfServiceTaint)
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
err = rc.attacherDetacher.DetachVolume(logger, attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
if err == nil {
if !timeout {
klog.InfoS("attacherDetacher.DetachVolume started", "volume", attachedVolume)
logger.Info("attacherDetacher.DetachVolume started", "volume", attachedVolume)
} else {
metrics.RecordForcedDetachMetric()
klog.InfoS("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume)
logger.Info("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume)
}
}
if err != nil {
// Add volume back to ReportAsAttached if DetachVolume call failed so that node status updater will add it back to VolumeAttached list.
// This function is also called during executing the volume detach operation in operation_generoator.
// It is needed here too because DetachVolume call might fail before executing the actual operation in operation_executor (e.g., cannot find volume plugin etc.)
rc.actualStateOfWorld.AddVolumeToReportAsAttached(attachedVolume.VolumeName, attachedVolume.NodeName)
rc.actualStateOfWorld.AddVolumeToReportAsAttached(logger, attachedVolume.VolumeName, attachedVolume.NodeName)
if !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
klog.ErrorS(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume)
logger.Error(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume)
}
}
}
}
rc.attachDesiredVolumes()
rc.attachDesiredVolumes(logger)
// Update Node Status
err := rc.nodeStatusUpdater.UpdateNodeStatuses()
err := rc.nodeStatusUpdater.UpdateNodeStatuses(logger)
if err != nil {
klog.Warningf("UpdateNodeStatuses failed with: %v", err)
logger.Info("UpdateNodeStatuses failed", "err", err)
}
}
func (rc *reconciler) attachDesiredVolumes() {
func (rc *reconciler) attachDesiredVolumes(logger klog.Logger) {
// Ensure volumes that should be attached are attached.
for _, volumeToAttach := range rc.desiredStateOfWorld.GetVolumesToAttach() {
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
klog.V(10).Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
logger.V(10).Info("Operation for volume is already running for node. Can't start attach", "node", klog.KRef("", string(volumeToAttach.NodeName)), "volumeName", volumeToAttach.VolumeName)
continue
}
} else {
// Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
klog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
logger.V(10).Info("Operation for volume is already running. Can't start attach for node", "node", klog.KRef("", string(volumeToAttach.NodeName)), "volumeNames", volumeToAttach.VolumeName)
continue
}
}
@ -323,8 +325,8 @@ func (rc *reconciler) attachDesiredVolumes() {
attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName)
if attachState == cache.AttachStateAttached {
// Volume/Node exists, touch it to reset detachRequestedTime
klog.V(10).InfoS("Volume attached--touching", "volume", volumeToAttach)
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
logger.V(10).Info("Volume attached--touching", "volume", volumeToAttach)
rc.actualStateOfWorld.ResetDetachRequestTime(logger, volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
@ -332,7 +334,7 @@ func (rc *reconciler) attachDesiredVolumes() {
nodes := rc.actualStateOfWorld.GetNodesForAttachedVolume(volumeToAttach.VolumeName)
if len(nodes) > 0 {
if !volumeToAttach.MultiAttachErrorReported {
rc.reportMultiAttachError(volumeToAttach, nodes)
rc.reportMultiAttachError(logger, volumeToAttach, nodes)
rc.desiredStateOfWorld.SetMultiAttachError(volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
continue
@ -340,22 +342,22 @@ func (rc *reconciler) attachDesiredVolumes() {
}
// Volume/Node doesn't exist, spawn a goroutine to attach it
klog.V(5).InfoS("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach)
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
logger.V(5).Info("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach)
err := rc.attacherDetacher.AttachVolume(logger, volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil {
klog.InfoS("attacherDetacher.AttachVolume started", "volume", volumeToAttach)
logger.Info("attacherDetacher.AttachVolume started", "volume", volumeToAttach)
}
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
klog.ErrorS(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach)
logger.Error(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach)
}
}
}
// reportMultiAttachError sends events and logs situation that a volume that
// should be attached to a node is already attached to different node(s).
func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) {
func (rc *reconciler) reportMultiAttachError(logger klog.Logger, volumeToAttach cache.VolumeToAttach, nodes []types.NodeName) {
// Filter out the current node from list of nodes where the volume is
// attached.
// Some methods need []string, some other needs []NodeName, collect both.
@ -373,7 +375,6 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
// Get list of pods that use the volume on the other nodes.
pods := rc.desiredStateOfWorld.GetVolumePodsOnNodes(otherNodes, volumeToAttach.VolumeName)
if len(pods) == 0 {
// We did not find any pods that requests the volume. The pod must have been deleted already.
simpleMsg, _ := volumeToAttach.GenerateMsg("Multi-Attach error", "Volume is already exclusively attached to one node and can't be attached to another")
@ -381,7 +382,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
}
// Log detailed message to system admin
klog.InfoS("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach)
logger.Info("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach)
return
}
@ -417,5 +418,5 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
}
// Log all pods for system admin
klog.InfoS("Multi-Attach error: volume is already used by pods", "pods", klog.KObjSlice(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach)
logger.Info("Multi-Attach error: volume is already used by pods", "pods", klog.KObjSlice(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach)
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package reconciler
import (
"context"
"testing"
"time"
@ -28,6 +29,8 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/record"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/statusupdater"
@ -40,10 +43,10 @@ import (
)
const (
reconcilerLoopPeriod time.Duration = 10 * time.Millisecond
syncLoopPeriod time.Duration = 100 * time.Minute
maxWaitForUnmountDuration time.Duration = 50 * time.Millisecond
maxLongWaitForUnmountDuration time.Duration = 4200 * time.Second
reconcilerLoopPeriod = 10 * time.Millisecond
syncLoopPeriod = 100 * time.Minute
maxWaitForUnmountDuration = 50 * time.Millisecond
maxLongWaitForUnmountDuration = 4200 * time.Second
)
// Calls Run()
@ -70,9 +73,10 @@ func Test_Run_Positive_DoNothing(t *testing.T) {
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder)
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 0 /* expectedCallCount */, fakePlugin)
@ -122,9 +126,10 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -175,9 +180,10 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -196,8 +202,8 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
generatedVolumeName,
nodeName)
}
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert
waitForNewDetacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -250,9 +256,10 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -324,9 +331,10 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -345,8 +353,8 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
generatedVolumeName,
nodeName)
}
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName, false /* mounted */)
asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, true /* mounted */)
asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName, false /* mounted */)
// Assert
verifyNewDetacherCallCount(t, true /* expectZeroNewDetacherCallCount */, fakePlugin)
@ -403,9 +411,10 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteMany(t *testing.
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 2 /* expectedCallCount */, fakePlugin)
@ -497,9 +506,10 @@ func Test_Run_OneVolumeAttachAndDetachMultipleNodesWithReadWriteOnce(t *testing.
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -579,9 +589,10 @@ func Test_Run_OneVolumeAttachAndDetachUncertainNodesWithReadWriteOnce(t *testing
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Add the pod in which the volume is attached to the uncertain node
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
@ -593,11 +604,11 @@ func Test_Run_OneVolumeAttachAndDetachUncertainNodesWithReadWriteOnce(t *testing
// Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw)
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse.
// Without this, the delete operation will be delayed due to mounted status
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName1, false /* mounted */)
asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName1, false /* mounted */)
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
@ -629,9 +640,12 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t *
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
nodeLister := informerFactory.Core().V1().Nodes().Lister()
nsu := statusupdater.NewFakeNodeStatusUpdater(false /* returnError */)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
rc := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, false, dsw, asw, ad, nsu, nodeLister, fakeRecorder)
reconciliationLoopFunc := rc.(*reconciler).reconciliationLoopFunc()
reconciliationLoopFunc := rc.(*reconciler).reconciliationLoopFunc(ctx)
podName1 := "pod-uid1"
volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
@ -646,22 +660,22 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t *
}
// Act
reconciliationLoopFunc()
reconciliationLoopFunc(ctx)
// Volume is added to asw, volume should be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw)
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// Delete the pod
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
// Mock NodeStatusUpdate fail
rc.(*reconciler).nodeStatusUpdater = statusupdater.NewFakeNodeStatusUpdater(true /* returnError */)
reconciliationLoopFunc()
reconciliationLoopFunc(ctx)
// The first detach will be triggered after at least 50ms (maxWaitForUnmountDuration in test).
time.Sleep(100 * time.Millisecond)
reconciliationLoopFunc()
reconciliationLoopFunc(ctx)
// Right before detach operation is performed, the volume will be first removed from being reported
// as attached on node status (RemoveVolumeFromReportAsAttached). After UpdateNodeStatus operation which is expected to fail,
// controller then added the volume back as attached.
@ -669,7 +683,7 @@ func Test_Run_UpdateNodeStatusFailBeforeOneVolumeDetachNodeWithReadWriteOnce(t *
// in node status. By calling this function (GetVolumesToReportAttached), node status should be updated, and the volume
// will not need to be updated until new changes are applied (detach is triggered again)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw)
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
}
@ -703,9 +717,10 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Add the pod in which the volume is attached to the FailDetachNode
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
@ -717,7 +732,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// Volume is added to asw, volume should be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw)
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// Delete the pod, but detach will fail
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
@ -732,7 +747,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// will not need to be updated until new changes are applied (detach is triggered again)
time.Sleep(100 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw)
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// After the first detach fails, reconciler will wait for a period of time before retrying to detach.
// The wait time is increasing exponentially from initial value of 0.5s (0.5, 1, 2, 4, ...).
@ -740,14 +755,14 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// the first detach operation. At this point, volumes status should not be updated
time.Sleep(100 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw)
verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
// Wait for 600ms to make sure second detach operation triggered. Again, The volume will be
// removed from being reported as attached on node status and then added back as attached.
// The volume will be in the list of attached volumes that need to be updated to node status.
time.Sleep(600 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, true, asw)
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, true, asw)
// Add a second pod which tries to attach the volume to the same node.
// After adding pod to the same node, detach will not be triggered any more.
@ -758,7 +773,7 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
// Sleep 1s to verify no detach are triggered after second pod is added in the future.
time.Sleep(1000 * time.Millisecond)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw)
verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
// Add a third pod which tries to attach the volume to a different node.
// At this point, volume is still attached to first node. There are no status update for both nodes.
@ -767,8 +782,8 @@ func Test_Run_OneVolumeDetachFailNodeWithReadWriteOnce(t *testing.T) {
t.Fatalf("AddPod failed. Expected: <no error> Actual: <%v>", podAddErr)
}
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateAttached, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName1, asw)
verifyVolumeNoStatusUpdateNeeded(t, generatedVolumeName, nodeName2, asw)
verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName1, asw)
verifyVolumeNoStatusUpdateNeeded(t, logger, generatedVolumeName, nodeName2, asw)
}
// Creates a volume with accessMode ReadWriteOnce
@ -805,9 +820,10 @@ func Test_Run_OneVolumeAttachAndDetachTimeoutNodesWithReadWriteOnce(t *testing.T
dsw.AddNode(nodeName2, false /*keepTerminatedPodVolumes*/)
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Add the pod in which the volume is attached to the timeout node
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName1), controllervolumetesting.NewPod(podName1, podName1), volumeSpec, nodeName1)
@ -818,11 +834,11 @@ func Test_Run_OneVolumeAttachAndDetachTimeoutNodesWithReadWriteOnce(t *testing.T
// Volume is added to asw. Because attach operation fails, volume should not be reported as attached to the node.
waitForVolumeAddedToNode(t, generatedVolumeName, nodeName1, asw)
verifyVolumeAttachedToNode(t, generatedVolumeName, nodeName1, cache.AttachStateUncertain, asw)
verifyVolumeReportedAsAttachedToNode(t, generatedVolumeName, nodeName1, false, asw)
verifyVolumeReportedAsAttachedToNode(t, logger, generatedVolumeName, nodeName1, false, asw)
// When volume is added to the node, it is set to mounted by default. Then the status will be updated by checking node status VolumeInUse.
// Without this, the delete operation will be delayed due to mounted status
asw.SetVolumeMountedByNode(generatedVolumeName, nodeName1, false /* mounted */)
asw.SetVolumeMountedByNode(logger, generatedVolumeName, nodeName1, false /* mounted */)
dsw.DeletePod(types.UniquePodName(podName1), generatedVolumeName, nodeName1)
@ -895,9 +911,10 @@ func Test_Run_OneVolumeDetachOnOutOfServiceTaintedNode(t *testing.T) {
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -973,9 +990,10 @@ func Test_Run_OneVolumeDetachOnNoOutOfServiceTaintedNode(t *testing.T) {
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -1057,9 +1075,10 @@ func Test_Run_OneVolumeDetachOnUnhealthyNode(t *testing.T) {
}
// Act
ch := make(chan struct{})
go reconciler.Run(ch)
defer close(ch)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go reconciler.Run(ctx)
// Assert
waitForNewAttacherCallCount(t, 1 /* expectedCallCount */, fakePlugin)
@ -1176,10 +1195,11 @@ func Test_ReportMultiAttachError(t *testing.T) {
}
}
// Act
logger, _ := ktesting.NewTestContext(t)
volumes := dsw.GetVolumesToAttach()
for _, vol := range volumes {
if vol.NodeName == "node1" {
rc.(*reconciler).reportMultiAttachError(vol, nodes)
rc.(*reconciler).reportMultiAttachError(logger, vol, nodes)
}
}
@ -1587,13 +1607,14 @@ func verifyVolumeAttachedToNode(
func verifyVolumeReportedAsAttachedToNode(
t *testing.T,
logger klog.Logger,
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName,
isAttached bool,
asw cache.ActualStateOfWorld,
) {
result := false
volumes := asw.GetVolumesToReportAttached()
volumes := asw.GetVolumesToReportAttached(logger)
for _, volume := range volumes[nodeName] {
if volume.Name == volumeName {
result = true
@ -1614,11 +1635,12 @@ func verifyVolumeReportedAsAttachedToNode(
func verifyVolumeNoStatusUpdateNeeded(
t *testing.T,
logger klog.Logger,
volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName,
asw cache.ActualStateOfWorld,
) {
volumes := asw.GetVolumesToReportAttached()
volumes := asw.GetVolumesToReportAttached(logger)
for _, volume := range volumes[nodeName] {
if volume.Name == volumeName {
t.Fatalf("Check volume <%v> is reported as need to update status on node <%v>, expected false",

View File

@ -18,6 +18,8 @@ package statusupdater
import (
"fmt"
"k8s.io/klog/v2"
"k8s.io/apimachinery/pkg/types"
)
@ -31,7 +33,7 @@ type fakeNodeStatusUpdater struct {
returnError bool
}
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error {
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses(logger klog.Logger) error {
if fnsu.returnError {
return fmt.Errorf("fake error on update node status")
}
@ -39,7 +41,7 @@ func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatuses() error {
return nil
}
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatusForNode(nodeName types.NodeName) error {
func (fnsu *fakeNodeStatusUpdater) UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error {
if fnsu.returnError {
return fmt.Errorf("fake error on update node status")
}

View File

@ -20,14 +20,13 @@ package statusupdater
import (
"fmt"
"k8s.io/klog/v2"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
nodeutil "k8s.io/component-helpers/node/util"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
)
@ -36,9 +35,9 @@ import (
type NodeStatusUpdater interface {
// Gets a list of node statuses that should be updated from the actual state
// of the world and updates them.
UpdateNodeStatuses() error
UpdateNodeStatuses(logger klog.Logger) error
// Update any pending status change for the given node
UpdateNodeStatusForNode(nodeName types.NodeName) error
UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error
}
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
@ -59,13 +58,13 @@ type nodeStatusUpdater struct {
actualStateOfWorld cache.ActualStateOfWorld
}
func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
func (nsu *nodeStatusUpdater) UpdateNodeStatuses(logger klog.Logger) error {
var nodeIssues int
// TODO: investigate right behavior if nodeName is empty
// kubernetes/kubernetes/issues/37777
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()
nodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached(logger)
for nodeName, attachedVolumes := range nodesToUpdate {
err := nsu.processNodeVolumes(nodeName, attachedVolumes)
err := nsu.processNodeVolumes(logger, nodeName, attachedVolumes)
if err != nil {
nodeIssues += 1
}
@ -76,56 +75,50 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
return nil
}
func (nsu *nodeStatusUpdater) UpdateNodeStatusForNode(nodeName types.NodeName) error {
needsUpdate, attachedVolumes := nsu.actualStateOfWorld.GetVolumesToReportAttachedForNode(nodeName)
func (nsu *nodeStatusUpdater) UpdateNodeStatusForNode(logger klog.Logger, nodeName types.NodeName) error {
needsUpdate, attachedVolumes := nsu.actualStateOfWorld.GetVolumesToReportAttachedForNode(logger, nodeName)
if !needsUpdate {
return nil
}
return nsu.processNodeVolumes(nodeName, attachedVolumes)
return nsu.processNodeVolumes(logger, nodeName, attachedVolumes)
}
func (nsu *nodeStatusUpdater) processNodeVolumes(nodeName types.NodeName, attachedVolumes []v1.AttachedVolume) error {
func (nsu *nodeStatusUpdater) processNodeVolumes(logger klog.Logger, nodeName types.NodeName, attachedVolumes []v1.AttachedVolume) error {
nodeObj, err := nsu.nodeLister.Get(string(nodeName))
if errors.IsNotFound(err) {
// If node does not exist, its status cannot be updated.
// Do nothing so that there is no retry until node is created.
klog.V(2).Infof(
"Could not update node status. Failed to find node %q in NodeInformer cache. Error: '%v'",
nodeName,
err)
logger.V(2).Info(
"Could not update node status. Failed to find node in NodeInformer cache", "node", klog.KRef("", string(nodeName)), "err", err)
return nil
} else if err != nil {
// For all other errors, log error and reset flag statusUpdateNeeded
// back to true to indicate this node status needs to be updated again.
klog.V(2).Infof("Error retrieving nodes from node lister. Error: %v", err)
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
logger.V(2).Info("Error retrieving nodes from node lister", "err", err)
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName)
return err
}
err = nsu.updateNodeStatus(nodeName, nodeObj, attachedVolumes)
err = nsu.updateNodeStatus(logger, nodeName, nodeObj, attachedVolumes)
if errors.IsNotFound(err) {
// If node does not exist, its status cannot be updated.
// Do nothing so that there is no retry until node is created.
klog.V(2).Infof(
"Could not update node status for %q; node does not exist - skipping",
nodeName)
logger.V(2).Info(
"Could not update node status, node does not exist - skipping", "node", klog.KObj(nodeObj))
return nil
} else if err != nil {
// If update node status fails, reset flag statusUpdateNeeded back to true
// to indicate this node status needs to be updated again
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)
nsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(logger, nodeName)
klog.V(2).Infof(
"Could not update node status for %q; re-marking for update. %v",
nodeName,
err)
logger.V(2).Info("Could not update node status; re-marking for update", "node", klog.KObj(nodeObj), "err", err)
return err
}
return nil
}
func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error {
func (nsu *nodeStatusUpdater) updateNodeStatus(logger klog.Logger, nodeName types.NodeName, nodeObj *v1.Node, attachedVolumes []v1.AttachedVolume) error {
node := nodeObj.DeepCopy()
node.Status.VolumesAttached = attachedVolumes
_, patchBytes, err := nodeutil.PatchNodeStatus(nsu.kubeClient.CoreV1(), nodeName, nodeObj, node)
@ -133,6 +126,6 @@ func (nsu *nodeStatusUpdater) updateNodeStatus(nodeName types.NodeName, nodeObj
return err
}
klog.V(4).Infof("Updating status %q for node %q succeeded. VolumesAttached: %v", patchBytes, nodeName, attachedVolumes)
logger.V(4).Info("Updating status for node succeeded", "node", klog.KObj(node), "patchBytes", patchBytes, "attachedVolumes", attachedVolumes)
return nil
}

View File

@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -27,6 +28,7 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
@ -37,7 +39,7 @@ import (
// setupNodeStatusUpdate creates all the needed objects for testing.
// the initial environment has 2 nodes with no volumes attached
// and adds one volume to attach to each node to the actual state of the world
func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualStateOfWorld, *fake.Clientset, NodeStatusUpdater) {
func setupNodeStatusUpdate(logger klog.Logger, t *testing.T) (cache.ActualStateOfWorld, *fake.Clientset, NodeStatusUpdater) {
testNode1 := corev1.Node{
TypeMeta: metav1.TypeMeta{
Kind: "Node",
@ -83,11 +85,11 @@ func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualState
nodeName2 := types.NodeName("testnode-2")
devicePath := "fake/device/path"
_, err = asw.AddVolumeNode(volumeName1, volumeSpec1, nodeName1, devicePath, true)
_, err = asw.AddVolumeNode(logger, volumeName1, volumeSpec1, nodeName1, devicePath, true)
if err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err)
}
_, err = asw.AddVolumeNode(volumeName2, volumeSpec2, nodeName2, devicePath, true)
_, err = asw.AddVolumeNode(logger, volumeName2, volumeSpec2, nodeName2, devicePath, true)
if err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", err)
}
@ -101,14 +103,15 @@ func setupNodeStatusUpdate(ctx context.Context, t *testing.T) (cache.ActualState
// checks that each node status.volumesAttached is of length 1 and contains the correct volume
func TestNodeStatusUpdater_UpdateNodeStatuses_TwoNodesUpdate(t *testing.T) {
ctx := context.Background()
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t)
logger := klog.FromContext(ctx)
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t)
err := nsu.UpdateNodeStatuses()
err := nsu.UpdateNodeStatuses(logger)
if err != nil {
t.Fatalf("UpdateNodeStatuses failed. Expected: <no error> Actual: <%v>", err)
}
needToReport := asw.GetVolumesToReportAttached()
needToReport := asw.GetVolumesToReportAttached(logger)
if len(needToReport) != 0 {
t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <0> Actual: <%v>", len(needToReport))
}
@ -138,7 +141,8 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_TwoNodesUpdate(t *testing.T) {
func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T) {
ctx := context.Background()
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t)
logger := klog.FromContext(ctx)
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t)
var failedNode string
failedOnce := false
@ -153,12 +157,12 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T)
return false, nil, nil
})
err := nsu.UpdateNodeStatuses()
err := nsu.UpdateNodeStatuses(logger)
if errors.Is(err, failureErr) {
t.Fatalf("UpdateNodeStatuses failed. Expected: <test generated error> Actual: <%v>", err)
}
needToReport := asw.GetVolumesToReportAttached()
needToReport := asw.GetVolumesToReportAttached(logger)
if len(needToReport) != 1 {
t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport))
}
@ -194,14 +198,15 @@ func TestNodeStatusUpdater_UpdateNodeStatuses_FailureInFirstUpdate(t *testing.T)
// checks that testnode-1 status.volumesAttached is of length 1 and contains the correct volume
func TestNodeStatusUpdater_UpdateNodeStatusForNode(t *testing.T) {
ctx := context.Background()
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(ctx, t)
logger := klog.FromContext(ctx)
asw, fakeKubeClient, nsu := setupNodeStatusUpdate(logger, t)
err := nsu.UpdateNodeStatusForNode("testnode-1")
err := nsu.UpdateNodeStatusForNode(logger, "testnode-1")
if err != nil {
t.Fatalf("UpdateNodeStatuses failed. Expected: <no error> Actual: <%v>", err)
}
needToReport := asw.GetVolumesToReportAttached()
needToReport := asw.GetVolumesToReportAttached(logger)
if len(needToReport) != 1 {
t.Fatalf("len(asw.GetVolumesToReportAttached()) Expected: <1> Actual: <%v>", len(needToReport))
}

View File

@ -376,7 +376,7 @@ func (plugin *TestPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
plugin.pluginLock.Lock()
defer plugin.pluginLock.Unlock()
if spec == nil {
klog.Errorf("GetVolumeName called with nil volume spec")
klog.ErrorS(nil, "GetVolumeName called with nil volume spec")
plugin.ErrorEncountered = true
return "", fmt.Errorf("GetVolumeName called with nil volume spec")
}
@ -400,7 +400,7 @@ func (plugin *TestPlugin) CanSupport(spec *volume.Spec) bool {
plugin.pluginLock.Lock()
defer plugin.pluginLock.Unlock()
if spec == nil {
klog.Errorf("CanSupport called with nil volume spec")
klog.ErrorS(nil, "CanSupport called with nil volume spec")
plugin.ErrorEncountered = true
}
return true
@ -414,7 +414,7 @@ func (plugin *TestPlugin) NewMounter(spec *volume.Spec, podRef *v1.Pod, opts vol
plugin.pluginLock.Lock()
defer plugin.pluginLock.Unlock()
if spec == nil {
klog.Errorf("NewMounter called with nil volume spec")
klog.ErrorS(nil, "NewMounter called with nil volume spec")
plugin.ErrorEncountered = true
}
return nil, nil
@ -540,7 +540,7 @@ func (attacher *testPluginAttacher) Attach(spec *volume.Spec, nodeName types.Nod
defer attacher.pluginLock.Unlock()
if spec == nil {
*attacher.ErrorEncountered = true
klog.Errorf("Attach called with nil volume spec")
klog.ErrorS(nil, "Attach called with nil volume spec")
return "", fmt.Errorf("Attach called with nil volume spec")
}
attacher.attachedVolumeMap[string(nodeName)] = append(attacher.attachedVolumeMap[string(nodeName)], spec.Name())
@ -556,7 +556,7 @@ func (attacher *testPluginAttacher) WaitForAttach(spec *volume.Spec, devicePath
defer attacher.pluginLock.Unlock()
if spec == nil {
*attacher.ErrorEncountered = true
klog.Errorf("WaitForAttach called with nil volume spec")
klog.ErrorS(nil, "WaitForAttach called with nil volume spec")
return "", fmt.Errorf("WaitForAttach called with nil volume spec")
}
fakePath := fmt.Sprintf("%s/%s", devicePath, spec.Name())
@ -568,7 +568,7 @@ func (attacher *testPluginAttacher) GetDeviceMountPath(spec *volume.Spec) (strin
defer attacher.pluginLock.Unlock()
if spec == nil {
*attacher.ErrorEncountered = true
klog.Errorf("GetDeviceMountPath called with nil volume spec")
klog.ErrorS(nil, "GetDeviceMountPath called with nil volume spec")
return "", fmt.Errorf("GetDeviceMountPath called with nil volume spec")
}
return "", nil
@ -579,7 +579,7 @@ func (attacher *testPluginAttacher) MountDevice(spec *volume.Spec, devicePath st
defer attacher.pluginLock.Unlock()
if spec == nil {
*attacher.ErrorEncountered = true
klog.Errorf("MountDevice called with nil volume spec")
klog.ErrorS(nil, "MountDevice called with nil volume spec")
return fmt.Errorf("MountDevice called with nil volume spec")
}
return nil

View File

@ -38,7 +38,7 @@ import (
// A volume.Spec that refers to an in-tree plugin spec is translated to refer
// to a migrated CSI plugin spec if all conditions for CSI migration on a node
// for the in-tree plugin is satisfied.
func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) {
func CreateVolumeSpec(logger klog.Logger, podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) {
claimName := ""
readOnly := false
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
@ -50,10 +50,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
claimName = ephemeral.VolumeClaimName(pod, &podVolume)
}
if claimName != "" {
klog.V(10).Infof(
"Found PVC, ClaimName: %q/%q",
pod.Namespace,
claimName)
logger.V(10).Info("Found PVC", "PVC", klog.KRef(pod.Namespace, claimName))
// If podVolume is a PVC, fetch the real PV behind the claim
pvc, err := getPVCFromCache(pod.Namespace, claimName, pvcLister)
@ -71,12 +68,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
}
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
klog.V(10).Infof(
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
pod.Namespace,
claimName,
pvcUID,
pvName)
logger.V(10).Info("Found bound PV for PVC", "PVC", klog.KRef(pod.Namespace, claimName), "pvcUID", pvcUID, "PV", klog.KRef("", pvName))
// Fetch actual PV object
volumeSpec, err := getPVSpecFromCache(
@ -98,13 +90,7 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
err)
}
klog.V(10).Infof(
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
volumeSpec.Name(),
pvName,
pod.Namespace,
claimName,
pvcUID)
logger.V(10).Info("Extracted volumeSpec from bound PV and PVC", "PVC", klog.KRef(pod.Namespace, claimName), "pvcUID", pvcUID, "PV", klog.KRef("", pvName), "volumeSpecName", volumeSpec.Name())
return volumeSpec, nil
}
@ -199,59 +185,39 @@ func DetermineVolumeAction(pod *v1.Pod, desiredStateOfWorld cache.DesiredStateOf
// ProcessPodVolumes processes the volumes in the given pod and adds them to the
// desired state of the world if addVolumes is true, otherwise it removes them.
func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.DesiredStateOfWorld, volumePluginMgr *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) {
func ProcessPodVolumes(logger klog.Logger, pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.DesiredStateOfWorld, volumePluginMgr *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) {
if pod == nil {
return
}
if len(pod.Spec.Volumes) <= 0 {
klog.V(10).Infof("Skipping processing of pod %q/%q: it has no volumes.",
pod.Namespace,
pod.Name)
logger.V(10).Info("Skipping processing of pod, it has no volumes", "pod", klog.KObj(pod))
return
}
nodeName := types.NodeName(pod.Spec.NodeName)
if nodeName == "" {
klog.V(10).Infof(
"Skipping processing of pod %q/%q: it is not scheduled to a node.",
pod.Namespace,
pod.Name)
logger.V(10).Info("Skipping processing of pod, it is not scheduled to a node", "pod", klog.KObj(pod))
return
} else if !desiredStateOfWorld.NodeExists(nodeName) {
// If the node the pod is scheduled to does not exist in the desired
// state of the world data structure, that indicates the node is not
// yet managed by the controller. Therefore, ignore the pod.
klog.V(4).Infof(
"Skipping processing of pod %q/%q: it is scheduled to node %q which is not managed by the controller.",
pod.Namespace,
pod.Name,
nodeName)
logger.V(4).Info("Skipping processing of pod, it is scheduled to node which is not managed by the controller", "node", klog.KRef("", string(nodeName)), "pod", klog.KObj(pod))
return
}
// Process volume spec for each volume defined in pod
for _, podVolume := range pod.Spec.Volumes {
volumeSpec, err := CreateVolumeSpec(podVolume, pod, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator)
volumeSpec, err := CreateVolumeSpec(logger, podVolume, pod, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator)
if err != nil {
klog.V(10).Infof(
"Error processing volume %q for pod %q/%q: %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
logger.V(10).Info("Error processing volume for pod", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
continue
}
attachableVolumePlugin, err :=
volumePluginMgr.FindAttachablePluginBySpec(volumeSpec)
if err != nil || attachableVolumePlugin == nil {
klog.V(10).Infof(
"Skipping volume %q for pod %q/%q: it does not implement attacher interface. err=%v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
logger.V(10).Info("Skipping volume for pod, it does not implement attacher interface", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
continue
}
@ -261,12 +227,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
_, err := desiredStateOfWorld.AddPod(
uniquePodName, pod, volumeSpec, nodeName)
if err != nil {
klog.V(10).Infof(
"Failed to add volume %q for pod %q/%q to desiredStateOfWorld. %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
logger.V(10).Info("Failed to add volume for pod to desiredStateOfWorld", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
}
} else {
@ -274,12 +235,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
uniqueVolumeName, err := util.GetUniqueVolumeNameFromSpec(
attachableVolumePlugin, volumeSpec)
if err != nil {
klog.V(10).Infof(
"Failed to delete volume %q for pod %q/%q from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed with %v",
podVolume.Name,
pod.Namespace,
pod.Name,
err)
logger.V(10).Info("Failed to delete volume for pod from desiredStateOfWorld. GetUniqueVolumeNameFromSpec failed", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "err", err)
continue
}
desiredStateOfWorld.DeletePod(

View File

@ -30,6 +30,7 @@ import (
kubetypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/fc"
@ -241,8 +242,9 @@ func Test_CreateVolumeSpec(t *testing.T) {
},
} {
t.Run(test.desc, func(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
plugMgr, intreeToCSITranslator, csiTranslator, pvLister, pvcLister := setup(testNodeName, t)
actualSpec, err := CreateVolumeSpec(test.pod.Spec.Volumes[0], test.pod, test.createNodeName, plugMgr, pvcLister, pvLister, intreeToCSITranslator, csiTranslator)
actualSpec, err := CreateVolumeSpec(logger, test.pod.Spec.Volumes[0], test.pod, test.createNodeName, plugMgr, pvcLister, pvLister, intreeToCSITranslator, csiTranslator)
if actualSpec == nil && (test.wantPersistentVolume != nil || test.wantVolume != nil) {
t.Errorf("got volume spec is nil")

View File

@ -165,9 +165,9 @@ func (ec *ephemeralController) onPVCDelete(obj interface{}) {
func (ec *ephemeralController) Run(ctx context.Context, workers int) {
defer runtime.HandleCrash()
defer ec.queue.ShutDown()
klog.Infof("Starting ephemeral volume controller")
defer klog.Infof("Shutting down ephemeral volume controller")
logger := klog.FromContext(ctx)
logger.Info("Starting ephemeral volume controller")
defer logger.Info("Shutting down ephemeral volume controller")
if !cache.WaitForNamedCacheSync("ephemeral", ctx.Done(), ec.podSynced, ec.pvcsSynced) {
return
@ -212,18 +212,19 @@ func (ec *ephemeralController) syncHandler(ctx context.Context, key string) erro
return err
}
pod, err := ec.podLister.Pods(namespace).Get(name)
logger := klog.FromContext(ctx)
if err != nil {
if errors.IsNotFound(err) {
klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is gone", key)
logger.V(5).Info("Ephemeral: nothing to do for pod, it is gone", "podKey", key)
return nil
}
klog.V(5).Infof("Error getting pod %s/%s (uid: %q) from informer : %v", pod.Namespace, pod.Name, pod.UID, err)
logger.V(5).Info("Error getting pod from informer", "pod", klog.KObj(pod), "podUID", pod.UID, "err", err)
return err
}
// Ignore pods which are already getting deleted.
if pod.DeletionTimestamp != nil {
klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is marked for deletion", key)
logger.V(5).Info("Ephemeral: nothing to do for pod, it is marked for deletion", "podKey", key)
return nil
}
@ -239,7 +240,8 @@ func (ec *ephemeralController) syncHandler(ctx context.Context, key string) erro
// handleEphemeralVolume is invoked for each volume of a pod.
func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vol v1.Volume) error {
klog.V(5).Infof("ephemeral: checking volume %s", vol.Name)
logger := klog.FromContext(ctx)
logger.V(5).Info("Ephemeral: checking volume", "volumeName", vol.Name)
if vol.Ephemeral == nil {
return nil
}
@ -254,7 +256,7 @@ func (ec *ephemeralController) handleVolume(ctx context.Context, pod *v1.Pod, vo
return err
}
// Already created, nothing more to do.
klog.V(5).Infof("ephemeral: volume %s: PVC %s already created", vol.Name, pvcName)
logger.V(5).Info("Ephemeral: PVC already created", "volumeName", vol.Name, "PVC", klog.KObj(pvc))
return nil
}

View File

@ -219,20 +219,21 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
if errors.IsNotFound(err) {
return nil
}
logger := klog.FromContext(ctx)
if err != nil {
klog.V(5).Infof("Error getting PVC %q from informer : %v", key, err)
logger.V(5).Info("Error getting PVC from informer", "pvcKey", key, "err", err)
return err
}
pv, err := expc.getPersistentVolume(ctx, pvc)
if err != nil {
klog.V(5).Infof("Error getting Persistent Volume for PVC %q (uid: %q) from informer : %v", key, pvc.UID, err)
logger.V(5).Info("Error getting Persistent Volume for PVC from informer", "pvcKey", key, "pvcUID", pvc.UID, "err", err)
return err
}
if pv.Spec.ClaimRef == nil || pvc.Namespace != pv.Spec.ClaimRef.Namespace || pvc.UID != pv.Spec.ClaimRef.UID {
err := fmt.Errorf("persistent Volume is not bound to PVC being updated : %s", key)
klog.V(4).Infof("%v", err)
logger.V(4).Info("", "err", err)
return err
}
@ -249,14 +250,14 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
volumeSpec := volume.NewSpecFromPersistentVolume(pv, false)
migratable, err := expc.csiMigratedPluginManager.IsMigratable(volumeSpec)
if err != nil {
klog.V(4).Infof("failed to check CSI migration status for PVC: %s with error: %v", key, err)
logger.V(4).Info("Failed to check CSI migration status for PVC with error", "pvcKey", key, "err", err)
return nil
}
// handle CSI migration scenarios before invoking FindExpandablePluginBySpec for in-tree
if migratable {
inTreePluginName, err := expc.csiMigratedPluginManager.GetInTreePluginNameFromSpec(volumeSpec.PersistentVolume, volumeSpec.Volume)
if err != nil {
klog.V(4).Infof("Error getting in-tree plugin name from persistent volume %s: %v", volumeSpec.PersistentVolume.Name, err)
logger.V(4).Info("Error getting in-tree plugin name from persistent volume", "volumeName", volumeSpec.PersistentVolume.Name, "err", err)
return err
}
@ -286,46 +287,45 @@ func (expc *expandController) syncHandler(ctx context.Context, key string) error
eventType = v1.EventTypeWarning
}
expc.recorder.Event(pvc, eventType, events.ExternalExpanding, msg)
klog.Infof("waiting for an external controller to expand the PVC %q (uid: %q)", key, pvc.UID)
logger.Info("Waiting for an external controller to expand the PVC", "pvcKey", key, "pvcUID", pvc.UID)
// If we are expecting that an external plugin will handle resizing this volume then
// is no point in requeuing this PVC.
return nil
}
volumeResizerName := volumePlugin.GetPluginName()
return expc.expand(pvc, pv, volumeResizerName)
return expc.expand(logger, pvc, pv, volumeResizerName)
}
func (expc *expandController) expand(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, resizerName string) error {
func (expc *expandController) expand(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, resizerName string) error {
// if node expand is complete and pv's annotation can be removed, remove the annotation from pv and return
if expc.isNodeExpandComplete(pvc, pv) && metav1.HasAnnotation(pv.ObjectMeta, util.AnnPreResizeCapacity) {
if expc.isNodeExpandComplete(logger, pvc, pv) && metav1.HasAnnotation(pv.ObjectMeta, util.AnnPreResizeCapacity) {
return util.DeleteAnnPreResizeCapacity(pv, expc.GetKubeClient())
}
var generatedOptions volumetypes.GeneratedOperations
var err error
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
generatedOptions, err = expc.operationGenerator.GenerateExpandAndRecoverVolumeFunc(pvc, pv, resizerName)
if err != nil {
klog.Errorf("Error starting ExpandVolume for pvc %s with %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
logger.Error(err, "Error starting ExpandVolume for pvc", "PVC", klog.KObj(pvc))
return err
}
} else {
pvc, err := util.MarkResizeInProgressWithResizer(pvc, resizerName, expc.kubeClient)
if err != nil {
klog.Errorf("Error setting PVC %s in progress with error : %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
logger.Error(err, "Error setting PVC in progress with error", "PVC", klog.KObj(pvc), "err", err)
return err
}
generatedOptions, err = expc.operationGenerator.GenerateExpandVolumeFunc(pvc, pv)
if err != nil {
klog.Errorf("Error starting ExpandVolume for pvc %s with %v", util.GetPersistentVolumeClaimQualifiedName(pvc), err)
logger.Error(err, "Error starting ExpandVolume for pvc with error", "PVC", klog.KObj(pvc), "err", err)
return err
}
}
klog.V(5).Infof("Starting ExpandVolume for volume %s", util.GetPersistentVolumeClaimQualifiedName(pvc))
logger.V(5).Info("Starting ExpandVolume for volume", "volumeName", util.GetPersistentVolumeClaimQualifiedName(pvc))
_, detailedErr := generatedOptions.Run()
return detailedErr
@ -335,9 +335,9 @@ func (expc *expandController) expand(pvc *v1.PersistentVolumeClaim, pv *v1.Persi
func (expc *expandController) Run(ctx context.Context) {
defer runtime.HandleCrash()
defer expc.queue.ShutDown()
klog.Infof("Starting expand controller")
defer klog.Infof("Shutting down expand controller")
logger := klog.FromContext(ctx)
logger.Info("Starting expand controller")
defer logger.Info("Shutting down expand controller")
if !cache.WaitForNamedCacheSync("expand", ctx.Done(), expc.pvcsSynced, expc.pvSynced) {
return
@ -367,8 +367,8 @@ func (expc *expandController) getPersistentVolume(ctx context.Context, pvc *v1.P
}
// isNodeExpandComplete returns true if pvc.Status.Capacity >= pv.Spec.Capacity
func (expc *expandController) isNodeExpandComplete(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool {
klog.V(4).Infof("pv %q capacity = %v, pvc %s capacity = %v", pv.Name, pv.Spec.Capacity[v1.ResourceStorage], pvc.ObjectMeta.Name, pvc.Status.Capacity[v1.ResourceStorage])
func (expc *expandController) isNodeExpandComplete(logger klog.Logger, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) bool {
logger.V(4).Info("pv and pvc capacity", "PV", klog.KObj(pv), "pvCapacity", pv.Spec.Capacity[v1.ResourceStorage], "PVC", klog.KObj(pvc), "pvcCapacity", pvc.Status.Capacity[v1.ResourceStorage])
pvcSpecCap := pvc.Spec.Resources.Requests.Storage()
pvcStatusCap, pvCap := pvc.Status.Capacity[v1.ResourceStorage], pv.Spec.Capacity[v1.ResourceStorage]
@ -469,7 +469,7 @@ func (expc *expandController) GetServiceAccountTokenFunc() func(_, _ string, _ *
func (expc *expandController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {
klog.Errorf("DeleteServiceAccountToken unsupported in expandController")
klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in expandController")
}
}

View File

@ -23,6 +23,7 @@ import (
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2/ktesting"
)
// Test single call to syncClaim and syncVolume methods.
@ -749,8 +750,8 @@ func TestSync(t *testing.T) {
test: testSyncClaim,
},
}
runSyncTests(t, tests, []*storage.StorageClass{
_, ctx := ktesting.NewTestContext(t)
runSyncTests(t, ctx, tests, []*storage.StorageClass{
{
ObjectMeta: metav1.ObjectMeta{Name: classWait},
VolumeBindingMode: &modeWait,
@ -964,8 +965,8 @@ func TestSyncBlockVolume(t *testing.T) {
test: testSyncVolume,
},
}
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
_, ctx := ktesting.NewTestContext(t)
runSyncTests(t, ctx, tests, []*storage.StorageClass{}, []*v1.Pod{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -1016,6 +1017,6 @@ func TestMultiSync(t *testing.T) {
test: testSyncClaim,
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
_, ctx := ktesting.NewTestContext(t)
runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "")
}

View File

@ -18,15 +18,16 @@ package persistentvolume
import (
"errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
"testing"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2/ktesting"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
"k8s.io/kubernetes/pkg/features"
)
// Test single call to syncVolume, expecting recycling to happen.
@ -37,6 +38,7 @@ func TestDeleteSync(t *testing.T) {
const gceDriver = "pd.csi.storage.gke.io"
// Default enable the HonorPVReclaimPolicy feature gate.
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)()
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{
{
// delete volume bound by controller
@ -106,7 +108,7 @@ func TestDeleteSync(t *testing.T) {
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Delete the volume before delete operation starts
reactor.DeleteVolume("volume8-6")
}),
@ -122,7 +124,7 @@ func TestDeleteSync(t *testing.T) {
expectedClaims: newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil),
expectedEvents: noevents,
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Bind the volume to resurrected claim (this should never
// happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound, nil)
@ -217,7 +219,7 @@ func TestDeleteSync(t *testing.T) {
test: wrapTestWithReclaimCalls(operationDelete, []error{nil}, testSyncVolume),
},
}
runSyncTests(t, tests, []*storage.StorageClass{}, []*v1.Pod{})
runSyncTests(t, ctx, tests, []*storage.StorageClass{}, []*v1.Pod{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -250,6 +252,6 @@ func TestDeleteMultiSync(t *testing.T) {
test: wrapTestWithReclaimCalls(operationDelete, []error{errors.New("Mock delete error"), nil}, testSyncVolume),
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
_, ctx := ktesting.NewTestContext(t)
runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "")
}

View File

@ -117,9 +117,9 @@ type volumeReactor struct {
ctrl *PersistentVolumeController
}
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []pvtesting.ReactorError) *volumeReactor {
func newVolumeReactor(ctx context.Context, client *fake.Clientset, ctrl *PersistentVolumeController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []pvtesting.ReactorError) *volumeReactor {
return &volumeReactor{
pvtesting.NewVolumeReactor(client, fakeVolumeWatch, fakeClaimWatch, errors),
pvtesting.NewVolumeReactor(ctx, client, fakeVolumeWatch, fakeClaimWatch, errors),
ctrl,
}
}
@ -170,14 +170,14 @@ func (r *volumeReactor) waitTest(test controllerTest) error {
// checkEvents compares all expectedEvents with events generated during the test
// and reports differences.
func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeController) error {
func checkEvents(t *testing.T, ctx context.Context, expectedEvents []string, ctrl *PersistentVolumeController) error {
var err error
// Read recorded events - wait up to 1 minute to get all the expected ones
// (just in case some goroutines are slower with writing)
timer := time.NewTimer(time.Minute)
defer timer.Stop()
logger := klog.FromContext(ctx)
fakeRecorder := ctrl.eventRecorder.(*record.FakeRecorder)
gotEvents := []string{}
finished := false
@ -185,14 +185,14 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo
select {
case event, ok := <-fakeRecorder.Events:
if ok {
klog.V(5).Infof("event recorder got event %s", event)
logger.V(5).Info("Event recorder got event", "event", event)
gotEvents = append(gotEvents, event)
} else {
klog.V(5).Infof("event recorder finished")
logger.V(5).Info("Event recorder finished")
finished = true
}
case _, _ = <-timer.C:
klog.V(5).Infof("event recorder timeout")
logger.V(5).Info("Event recorder timeout")
finished = true
}
}
@ -219,7 +219,7 @@ func checkEvents(t *testing.T, expectedEvents []string, ctrl *PersistentVolumeCo
func alwaysReady() bool { return true }
func newTestController(kubeClient clientset.Interface, informerFactory informers.SharedInformerFactory, enableDynamicProvisioning bool) (*PersistentVolumeController, error) {
func newTestController(ctx context.Context, kubeClient clientset.Interface, informerFactory informers.SharedInformerFactory, enableDynamicProvisioning bool) (*PersistentVolumeController, error) {
if informerFactory == nil {
informerFactory = informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
}
@ -235,7 +235,7 @@ func newTestController(kubeClient clientset.Interface, informerFactory informers
EventRecorder: record.NewFakeRecorder(1000),
EnableDynamicProvisioning: enableDynamicProvisioning,
}
ctrl, err := NewController(params)
ctrl, err := NewController(ctx, params)
if err != nil {
return nil, fmt.Errorf("failed to construct persistentvolume controller: %v", err)
}
@ -586,18 +586,18 @@ const operationDelete = "Delete"
const operationRecycle = "Recycle"
var (
classGold string = "gold"
classSilver string = "silver"
classCopper string = "copper"
classEmpty string = ""
classNonExisting string = "non-existing"
classExternal string = "external"
classExternalWait string = "external-wait"
classUnknownInternal string = "unknown-internal"
classUnsupportedMountOptions string = "unsupported-mountoptions"
classLarge string = "large"
classWait string = "wait"
classCSI string = "csi"
classGold = "gold"
classSilver = "silver"
classCopper = "copper"
classEmpty = ""
classNonExisting = "non-existing"
classExternal = "external"
classExternalWait = "external-wait"
classUnknownInternal = "unknown-internal"
classUnsupportedMountOptions = "unsupported-mountoptions"
classLarge = "large"
classWait = "wait"
classCSI = "csi"
modeWait = storage.VolumeBindingWaitForFirstConsumer
)
@ -670,13 +670,13 @@ func wrapTestWithCSIMigrationProvisionCalls(toWrap testCall) testCall {
// injected function to simulate that something is happening when the
// controller waits for the operation lock. Controller is then resumed and we
// check how it behaves.
func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall {
func wrapTestWithInjectedOperation(ctx context.Context, toWrap testCall, injectBeforeOperation func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor)) testCall {
return func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest) error {
// Inject a hook before async operation starts
ctrl.preOperationHook = func(operationName string) {
// Inside the hook, run the function to inject
klog.V(4).Infof("reactor: scheduleOperation reached, injecting call")
klog.FromContext(ctx).V(4).Info("Reactor: scheduleOperation reached, injecting call")
injectBeforeOperation(ctrl, reactor)
}
@ -700,7 +700,7 @@ func wrapTestWithInjectedOperation(toWrap testCall, injectBeforeOperation func(c
}
}
func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest, t *testing.T) {
func evaluateTestResults(ctx context.Context, ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor, test controllerTest, t *testing.T) {
// Evaluate results
if err := reactor.CheckClaims(test.expectedClaims); err != nil {
t.Errorf("Test %q: %v", test.name, err)
@ -710,7 +710,7 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo
t.Errorf("Test %q: %v", test.name, err)
}
if err := checkEvents(t, test.expectedEvents, ctrl); err != nil {
if err := checkEvents(t, ctx, test.expectedEvents, ctrl); err != nil {
t.Errorf("Test %q: %v", test.name, err)
}
}
@ -721,15 +721,15 @@ func evaluateTestResults(ctrl *PersistentVolumeController, reactor *pvtesting.Vo
// 2. Call the tested function (syncClaim/syncVolume) via
// controllerTest.testCall *once*.
// 3. Compare resulting volumes and claims with expected volumes and claims.
func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
func runSyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, pods []*v1.Pod) {
doit := func(t *testing.T, test controllerTest) {
// Initialize the controller
client := &fake.Clientset{}
ctrl, err := newTestController(client, nil, true)
ctrl, err := newTestController(ctx, client, nil, true)
if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
}
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)
reactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)
for _, claim := range test.initialClaims {
if metav1.HasAnnotation(claim.ObjectMeta, annSkipLocalStore) {
continue
@ -771,7 +771,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
t.Errorf("Test %q failed: %v", test.name, err)
}
evaluateTestResults(ctrl, reactor.VolumeReactor, test, t)
evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)
}
for _, test := range tests {
@ -797,13 +797,14 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
// of volumes/claims with expected claims/volumes and report differences.
//
// Some limit of calls in enforced to prevent endless loops.
func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {
func runMultisyncTests(t *testing.T, ctx context.Context, tests []controllerTest, storageClasses []*storage.StorageClass, defaultStorageClass string) {
logger := klog.FromContext(ctx)
run := func(t *testing.T, test controllerTest) {
klog.V(4).Infof("starting multisync test %q", test.name)
logger.V(4).Info("Starting multisync test", "testName", test.name)
// Initialize the controller
client := &fake.Clientset{}
ctrl, err := newTestController(client, nil, true)
ctrl, err := newTestController(ctx, client, nil, true)
if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
}
@ -815,7 +816,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
}
ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)
reactor := newVolumeReactor(ctx, client, ctrl, nil, nil, test.errors)
for _, claim := range test.initialClaims {
ctrl.claims.Add(claim)
}
@ -837,7 +838,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
counter := 0
for {
counter++
klog.V(4).Infof("test %q: iteration %d", test.name, counter)
logger.V(4).Info("Test", "testName", test.name, "iteration", counter)
if counter > 100 {
t.Errorf("Test %q failed: too many iterations", test.name)
@ -847,7 +848,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
// Wait for all goroutines to finish
reactor.waitForIdle()
obj := reactor.PopChange()
obj := reactor.PopChange(ctx)
if obj == nil {
// Nothing was changed, should we exit?
if firstSync || reactor.GetChangeCount() > 0 {
@ -855,7 +856,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
// Simulate "periodic sync" of everything (until it produces
// no changes).
firstSync = false
klog.V(4).Infof("test %q: simulating periodical sync of all claims and volumes", test.name)
logger.V(4).Info("Test simulating periodical sync of all claims and volumes", "testName", test.name)
reactor.SyncAll()
} else {
// Last sync did not produce any updates, the test reached
@ -876,7 +877,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == pvtesting.ErrVersionConflict {
// Ignore version errors
klog.V(4).Infof("test intentionally ignores version error.")
logger.V(4).Info("Test intentionally ignores version error")
} else {
t.Errorf("Error calling syncClaim: %v", err)
// Finish the loop on the first error
@ -893,7 +894,7 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
if err != nil {
if err == pvtesting.ErrVersionConflict {
// Ignore version errors
klog.V(4).Infof("test intentionally ignores version error.")
logger.V(4).Info("Test intentionally ignores version error")
} else {
t.Errorf("Error calling syncVolume: %v", err)
// Finish the loop on the first error
@ -904,8 +905,8 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
continue
}
}
evaluateTestResults(ctrl, reactor.VolumeReactor, test, t)
klog.V(4).Infof("test %q finished after %d iterations", test.name, counter)
evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)
logger.V(4).Info("Test finished after iterations", "testName", test.name, "iterations", counter)
}
for _, test := range tests {
@ -985,10 +986,10 @@ func (plugin *mockVolumePlugin) NewUnmounter(name string, podUID types.UID) (vol
// Provisioner interfaces
func (plugin *mockVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *mockVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
if len(plugin.provisionCalls) > 0 {
// mockVolumePlugin directly implements Provisioner interface
klog.V(4).Infof("mock plugin NewProvisioner called, returning mock provisioner")
logger.V(4).Info("Mock plugin NewProvisioner called, returning mock provisioner")
plugin.provisionOptions = options
return plugin, nil
} else {
@ -1000,11 +1001,10 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
if len(plugin.provisionCalls) <= plugin.provisionCallCounter {
return nil, fmt.Errorf("Mock plugin error: unexpected provisioner call %d", plugin.provisionCallCounter)
}
var pv *v1.PersistentVolume
call := plugin.provisionCalls[plugin.provisionCallCounter]
if !reflect.DeepEqual(call.expectedParameters, plugin.provisionOptions.Parameters) {
klog.Errorf("invalid provisioner call, expected options: %+v, got: %+v", call.expectedParameters, plugin.provisionOptions.Parameters)
klog.TODO().Error(nil, "Invalid provisioner call", "gotOptions", plugin.provisionOptions.Parameters, "expectedOptions", call.expectedParameters)
return nil, fmt.Errorf("Mock plugin error: invalid provisioner call")
}
if call.ret == nil {
@ -1033,16 +1033,16 @@ func (plugin *mockVolumePlugin) Provision(selectedNode *v1.Node, allowedTopologi
}
plugin.provisionCallCounter++
klog.V(4).Infof("mock plugin Provision call nr. %d, returning %v: %v", plugin.provisionCallCounter, pv, call.ret)
klog.TODO().V(4).Info("Mock plugin Provision call nr", "provisionCallCounter", plugin.provisionCallCounter, "pv", klog.KObj(pv), "err", call.ret)
return pv, call.ret
}
// Deleter interfaces
func (plugin *mockVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *mockVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
if len(plugin.deleteCalls) > 0 {
// mockVolumePlugin directly implements Deleter interface
klog.V(4).Infof("mock plugin NewDeleter called, returning mock deleter")
logger.V(4).Info("Mock plugin NewDeleter called, returning mock deleter")
return plugin, nil
} else {
return nil, fmt.Errorf("Mock plugin error: no deleteCalls configured")
@ -1055,7 +1055,7 @@ func (plugin *mockVolumePlugin) Delete() error {
}
ret := plugin.deleteCalls[plugin.deleteCallCounter]
plugin.deleteCallCounter++
klog.V(4).Infof("mock plugin Delete call nr. %d, returning %v", plugin.deleteCallCounter, ret)
klog.TODO().V(4).Info("Mock plugin Delete call nr", "deleteCallCounter", plugin.deleteCallCounter, "err", ret)
return ret
}
@ -1081,6 +1081,6 @@ func (plugin *mockVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventR
}
ret := plugin.recycleCalls[plugin.recycleCallCounter]
plugin.recycleCallCounter++
klog.V(4).Infof("mock plugin Recycle call nr. %d, returning %v", plugin.recycleCallCounter, ret)
klog.TODO().V(4).Info("Mock plugin Recycle call nr", "recycleCallCounter", plugin.recycleCallCounter, "err", ret)
return ret
}

View File

@ -17,11 +17,11 @@ limitations under the License.
package persistentvolume
import (
"context"
"errors"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features"
"testing"
@ -173,6 +173,7 @@ var provision2Success = provisionCall{
func TestProvisionSync(t *testing.T) {
// Default enable the HonorPVReclaimPolicy feature gate.
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.HonorPVReclaimPolicy, true)()
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{
{
// Provision a volume (with a default class)
@ -243,7 +244,7 @@ func TestProvisionSync(t *testing.T) {
expectedClaims: newClaimArray("claim11-7", "uid11-7", "1Gi", "", v1.ClaimPending, &classGold, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner),
expectedEvents: noevents,
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Create a volume before provisionClaimOperation starts.
// This similates a parallel controller provisioning the volume.
volume := newVolume("pvc-uid11-7", "1Gi", "uid11-7", "claim11-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, classGold, volume.AnnBoundByController, volume.AnnDynamicallyProvisioned)
@ -528,7 +529,7 @@ func TestProvisionSync(t *testing.T) {
newClaimArray("claim11-23", "uid11-23", "1Gi", "", v1.ClaimPending, &classCopper, volume.AnnStorageProvisioner, volume.AnnBetaStorageProvisioner)),
[]string{"Normal ProvisioningSucceeded"},
noerrors,
wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{provision1Success}, testSyncClaim),
func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
nodesIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1"}}
@ -578,7 +579,7 @@ func TestProvisionSync(t *testing.T) {
wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim),
},
}
runSyncTests(t, tests, storageClasses, []*v1.Pod{})
runSyncTests(t, ctx, tests, storageClasses, []*v1.Pod{})
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -597,6 +598,7 @@ func TestProvisionSync(t *testing.T) {
//
// Some limit of calls in enforced to prevent endless loops.
func TestProvisionMultiSync(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{
{
// Provision a volume with binding
@ -620,7 +622,7 @@ func TestProvisionMultiSync(t *testing.T) {
newClaimArray("claim12-2", "uid12-2", "1Gi", "pvc-uid12-2", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))),
expectedEvents: []string{"Normal ExternalProvisioning"},
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Create a volume before syncClaim tries to bind a PV to PVC
// This simulates external provisioner creating a volume while the controller
// is waiting for a volume to bind to the existed claim
@ -659,7 +661,7 @@ func TestProvisionMultiSync(t *testing.T) {
newClaimArray("claim12-4", "uid12-4", "1Gi", "pvc-uid12-4", v1.ClaimBound, &classExternal, volume.AnnBoundByController, volume.AnnBindCompleted))),
expectedEvents: []string{"Normal ExternalProvisioning"},
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithProvisionCalls([]provisionCall{}, testSyncClaim), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Create a volume before syncClaim tries to bind a PV to PVC
// This simulates external provisioner creating a volume while the controller
// is waiting for a volume to bind to the existed claim
@ -676,16 +678,17 @@ func TestProvisionMultiSync(t *testing.T) {
},
}
runMultisyncTests(t, tests, storageClasses, storageClasses[0].Name)
runMultisyncTests(t, ctx, tests, storageClasses, storageClasses[0].Name)
}
// When provisioning is disabled, provisioning a claim should instantly return nil
func TestDisablingDynamicProvisioner(t *testing.T) {
ctrl, err := newTestController(nil, nil, false)
_, ctx := ktesting.NewTestContext(t)
ctrl, err := newTestController(ctx, nil, nil, false)
if err != nil {
t.Fatalf("Construct PersistentVolume controller failed: %v", err)
}
retVal := ctrl.provisionClaim(context.TODO(), nil)
retVal := ctrl.provisionClaim(ctx, nil)
if retVal != nil {
t.Errorf("Expected nil return but got %v", retVal)
}

File diff suppressed because it is too large Load Diff

View File

@ -79,7 +79,7 @@ type ControllerParameters struct {
}
// NewController creates a new PersistentVolume controller
func NewController(p ControllerParameters) (*PersistentVolumeController, error) {
func NewController(ctx context.Context, p ControllerParameters) (*PersistentVolumeController, error) {
eventRecorder := p.EventRecorder
var eventBroadcaster record.EventBroadcaster
if eventRecorder == nil {
@ -112,9 +112,9 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
p.VolumeInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.volumeQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.volumeQueue, obj) },
AddFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.volumeQueue, obj) },
},
)
controller.volumeLister = p.VolumeInformer.Lister()
@ -122,9 +122,9 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
p.ClaimInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(controller.claimQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) },
AddFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
UpdateFunc: func(oldObj, newObj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, newObj) },
DeleteFunc: func(obj interface{}) { controller.enqueueWork(ctx, controller.claimQueue, obj) },
},
)
controller.claimLister = p.ClaimInformer.Lister()
@ -156,53 +156,54 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
// initializeCaches fills all controller caches with initial data from etcd in
// order to have the caches already filled when first addClaim/addVolume to
// perform initial synchronization of the controller.
func (ctrl *PersistentVolumeController) initializeCaches(volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
func (ctrl *PersistentVolumeController) initializeCaches(logger klog.Logger, volumeLister corelisters.PersistentVolumeLister, claimLister corelisters.PersistentVolumeClaimLister) {
volumeList, err := volumeLister.List(labels.Everything())
if err != nil {
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
logger.Error(err, "PersistentVolumeController can't initialize caches")
return
}
for _, volume := range volumeList {
volumeClone := volume.DeepCopy()
if _, err = ctrl.storeVolumeUpdate(volumeClone); err != nil {
klog.Errorf("error updating volume cache: %v", err)
if _, err = ctrl.storeVolumeUpdate(logger, volumeClone); err != nil {
logger.Error(err, "Error updating volume cache")
}
}
claimList, err := claimLister.List(labels.Everything())
if err != nil {
klog.Errorf("PersistentVolumeController can't initialize caches: %v", err)
logger.Error(err, "PersistentVolumeController can't initialize caches")
return
}
for _, claim := range claimList {
if _, err = ctrl.storeClaimUpdate(claim.DeepCopy()); err != nil {
klog.Errorf("error updating claim cache: %v", err)
if _, err = ctrl.storeClaimUpdate(logger, claim.DeepCopy()); err != nil {
logger.Error(err, "Error updating claim cache")
}
}
klog.V(4).Infof("controller initialized")
logger.V(4).Info("Controller initialized")
}
// enqueueWork adds volume or claim to given work queue.
func (ctrl *PersistentVolumeController) enqueueWork(queue workqueue.Interface, obj interface{}) {
func (ctrl *PersistentVolumeController) enqueueWork(ctx context.Context, queue workqueue.Interface, obj interface{}) {
// Beware of "xxx deleted" events
logger := klog.FromContext(ctx)
if unknown, ok := obj.(cache.DeletedFinalStateUnknown); ok && unknown.Obj != nil {
obj = unknown.Obj
}
objName, err := controller.KeyFunc(obj)
if err != nil {
klog.Errorf("failed to get key from object: %v", err)
logger.Error(err, "Failed to get key from object")
return
}
klog.V(5).Infof("enqueued %q for sync", objName)
logger.V(5).Info("Enqueued for sync", "objName", objName)
queue.Add(objName)
}
func (ctrl *PersistentVolumeController) storeVolumeUpdate(volume interface{}) (bool, error) {
return storeObjectUpdate(ctrl.volumes.store, volume, "volume")
func (ctrl *PersistentVolumeController) storeVolumeUpdate(logger klog.Logger, volume interface{}) (bool, error) {
return storeObjectUpdate(logger, ctrl.volumes.store, volume, "volume")
}
func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (bool, error) {
return storeObjectUpdate(ctrl.claims, claim, "claim")
func (ctrl *PersistentVolumeController) storeClaimUpdate(logger klog.Logger, claim interface{}) (bool, error) {
return storeObjectUpdate(logger, ctrl.claims, claim, "claim")
}
// updateVolume runs in worker thread and handles "volume added",
@ -210,9 +211,10 @@ func (ctrl *PersistentVolumeController) storeClaimUpdate(claim interface{}) (boo
func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume *v1.PersistentVolume) {
// Store the new volume version in the cache and do not process it if this
// is an old version.
new, err := ctrl.storeVolumeUpdate(volume)
logger := klog.FromContext(ctx)
new, err := ctrl.storeVolumeUpdate(logger, volume)
if err != nil {
klog.Errorf("%v", err)
logger.Error(err, "")
}
if !new {
return
@ -223,19 +225,20 @@ func (ctrl *PersistentVolumeController) updateVolume(ctx context.Context, volume
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
klog.V(3).Infof("could not sync volume %q: %+v", volume.Name, err)
logger.V(3).Info("Could not sync volume", "volumeName", volume.Name, "err", err)
} else {
klog.Errorf("could not sync volume %q: %+v", volume.Name, err)
logger.Error(err, "Could not sync volume", "volumeName", volume.Name, "err", err)
}
}
}
// deleteVolume runs in worker thread and handles "volume deleted" event.
func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume) {
func (ctrl *PersistentVolumeController) deleteVolume(ctx context.Context, volume *v1.PersistentVolume) {
logger := klog.FromContext(ctx)
if err := ctrl.volumes.store.Delete(volume); err != nil {
klog.Errorf("volume %q deletion encountered : %v", volume.Name, err)
logger.Error(err, "Volume deletion encountered", "volumeName", volume.Name)
} else {
klog.V(4).Infof("volume %q deleted", volume.Name)
logger.V(4).Info("volume deleted", "volumeName", volume.Name)
}
// record deletion metric if a deletion start timestamp is in the cache
// the following calls will be a no-op if there is nothing for this volume in the cache
@ -249,7 +252,7 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
// claim here in response to volume deletion prevents the claim from
// waiting until the next sync period for its Lost status.
claimKey := claimrefToClaimKey(volume.Spec.ClaimRef)
klog.V(5).Infof("deleteVolume[%s]: scheduling sync of claim %q", volume.Name, claimKey)
logger.V(5).Info("deleteVolume: scheduling sync of claim", "PVC", klog.KRef(volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name), "volumeName", volume.Name)
ctrl.claimQueue.Add(claimKey)
}
@ -258,9 +261,10 @@ func (ctrl *PersistentVolumeController) deleteVolume(volume *v1.PersistentVolume
func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
// Store the new claim version in the cache and do not process it if this is
// an old version.
new, err := ctrl.storeClaimUpdate(claim)
logger := klog.FromContext(ctx)
new, err := ctrl.storeClaimUpdate(logger, claim)
if err != nil {
klog.Errorf("%v", err)
logger.Error(err, "")
}
if !new {
return
@ -270,35 +274,36 @@ func (ctrl *PersistentVolumeController) updateClaim(ctx context.Context, claim *
if errors.IsConflict(err) {
// Version conflict error happens quite often and the controller
// recovers from it easily.
klog.V(3).Infof("could not sync claim %q: %+v", claimToClaimKey(claim), err)
logger.V(3).Info("Could not sync claim", "PVC", klog.KObj(claim), "err", err)
} else {
klog.Errorf("could not sync volume %q: %+v", claimToClaimKey(claim), err)
logger.Error(err, "Could not sync volume", "PVC", klog.KObj(claim))
}
}
}
// Unit test [5-5] [5-6] [5-7]
// deleteClaim runs in worker thread and handles "claim deleted" event.
func (ctrl *PersistentVolumeController) deleteClaim(claim *v1.PersistentVolumeClaim) {
func (ctrl *PersistentVolumeController) deleteClaim(ctx context.Context, claim *v1.PersistentVolumeClaim) {
logger := klog.FromContext(ctx)
if err := ctrl.claims.Delete(claim); err != nil {
klog.Errorf("claim %q deletion encountered : %v", claim.Name, err)
logger.Error(err, "Claim deletion encountered", "PVC", klog.KObj(claim))
}
claimKey := claimToClaimKey(claim)
klog.V(4).Infof("claim %q deleted", claimKey)
logger.V(4).Info("Claim deleted", "PVC", klog.KObj(claim))
// clean any possible unfinished provision start timestamp from cache
// Unit test [5-8] [5-9]
ctrl.operationTimestamps.Delete(claimKey)
volumeName := claim.Spec.VolumeName
if volumeName == "" {
klog.V(5).Infof("deleteClaim[%q]: volume not bound", claimKey)
logger.V(5).Info("deleteClaim: volume not bound", "PVC", klog.KObj(claim))
return
}
// sync the volume when its claim is deleted. Explicitly sync'ing the
// volume here in response to claim deletion prevents the volume from
// waiting until the next sync period for its Release.
klog.V(5).Infof("deleteClaim[%q]: scheduling sync of volume %s", claimKey, volumeName)
logger.V(5).Info("deleteClaim: scheduling sync of volume", "PVC", klog.KObj(claim), "volumeName", volumeName)
ctrl.volumeQueue.Add(volumeName)
}
@ -314,17 +319,17 @@ func (ctrl *PersistentVolumeController) Run(ctx context.Context) {
ctrl.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: ctrl.kubeClient.CoreV1().Events("")})
defer ctrl.eventBroadcaster.Shutdown()
}
klog.Infof("Starting persistent volume controller")
defer klog.Infof("Shutting down persistent volume controller")
logger := klog.FromContext(ctx)
logger.Info("Starting persistent volume controller")
defer logger.Info("Shutting down persistent volume controller")
if !cache.WaitForNamedCacheSync("persistent volume", ctx.Done(), ctrl.volumeListerSynced, ctrl.claimListerSynced, ctrl.classListerSynced, ctrl.podListerSynced, ctrl.NodeListerSynced) {
return
}
ctrl.initializeCaches(ctrl.volumeLister, ctrl.claimLister)
ctrl.initializeCaches(logger, ctrl.volumeLister, ctrl.claimLister)
go wait.Until(ctrl.resync, ctrl.resyncPeriod, ctx.Done())
go wait.Until(func() { ctrl.resync(ctx) }, ctrl.resyncPeriod, ctx.Done())
go wait.UntilWithContext(ctx, ctrl.volumeWorker, time.Second)
go wait.UntilWithContext(ctx, ctrl.claimWorker, time.Second)
@ -342,7 +347,8 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont
// when no modifications are required this function could sometimes return a
// copy of the volume and sometimes return a ref to the original
claimClone := claim.DeepCopy()
modified := updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
logger := klog.FromContext(ctx)
modified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
if !modified {
return claimClone, nil
}
@ -350,7 +356,7 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont
if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
}
_, err = ctrl.storeClaimUpdate(newClaim)
_, err = ctrl.storeClaimUpdate(logger, newClaim)
if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations: %v", err)
}
@ -360,8 +366,9 @@ func (ctrl *PersistentVolumeController) updateClaimMigrationAnnotations(ctx cont
func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinalizers(ctx context.Context,
volume *v1.PersistentVolume) (*v1.PersistentVolume, error) {
volumeClone := volume.DeepCopy()
annModified := updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false)
modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(ctrl.csiMigratedPluginManager, volumeClone)
logger := klog.FromContext(ctx)
annModified := updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, volumeClone.Annotations, false)
modifiedFinalizers, finalizersModified := modifyDeletionFinalizers(logger, ctrl.csiMigratedPluginManager, volumeClone)
if !annModified && !finalizersModified {
return volumeClone, nil
}
@ -372,7 +379,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinal
if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
}
_, err = ctrl.storeVolumeUpdate(newVol)
_, err = ctrl.storeVolumeUpdate(logger, newVol)
if err != nil {
return nil, fmt.Errorf("persistent Volume Controller can't anneal migration annotations or finalizer: %v", err)
}
@ -385,7 +392,7 @@ func (ctrl *PersistentVolumeController) updateVolumeMigrationAnnotationsAndFinal
// `Recycle`, removing the finalizer is necessary to reflect the recalimPolicy updates on the PV.
// The method also removes any external PV Deletion Protection finalizers added on the PV, this represents CSI migration
// rollback/disable scenarios.
func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) {
func modifyDeletionFinalizers(logger klog.Logger, cmpm CSIMigratedPluginManager, volume *v1.PersistentVolume) ([]string, bool) {
modified := false
var outFinalizers []string
if !utilfeature.DefaultFeatureGate.Enabled(features.HonorPVReclaimPolicy) {
@ -416,18 +423,18 @@ func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.Persiste
reclaimPolicy := volume.Spec.PersistentVolumeReclaimPolicy
// Add back the in-tree PV deletion protection finalizer if does not already exists
if reclaimPolicy == v1.PersistentVolumeReclaimDelete && !slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
klog.V(4).Infof("Adding in-tree pv deletion protection finalizer on %s", volume.Name)
logger.V(4).Info("Adding in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
outFinalizers = append(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer)
modified = true
} else if (reclaimPolicy == v1.PersistentVolumeReclaimRetain || reclaimPolicy == v1.PersistentVolumeReclaimRecycle) && slice.ContainsString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil) {
// Remove the in-tree PV deletion protection finalizer if the reclaim policy is 'Retain' or 'Recycle'
klog.V(4).Infof("Removing in-tree pv deletion protection finalizer on %s", volume.Name)
logger.V(4).Info("Removing in-tree pv deletion protection finalizer on volume", "volumeName", volume.Name)
outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionInTreeProtectionFinalizer, nil)
modified = true
}
// Remove the external PV deletion protection finalizer
if slice.ContainsString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil) {
klog.V(4).Infof("Removing external pv deletion protection finalizer on %s", volume.Name)
logger.V(4).Info("Removing external pv deletion protection finalizer on volume", "volumeName", volume.Name)
outFinalizers = slice.RemoveString(outFinalizers, storagehelpers.PVDeletionProtectionFinalizer, nil)
modified = true
}
@ -440,7 +447,7 @@ func modifyDeletionFinalizers(cmpm CSIMigratedPluginManager, volume *v1.Persiste
// driver name for that provisioner is "on" based on feature flags, it will also
// remove the annotation is migration is "off" for that provisioner in rollback
// scenarios. Returns true if the annotations map was modified and false otherwise.
func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool {
func updateMigrationAnnotations(logger klog.Logger, cmpm CSIMigratedPluginManager, translator CSINameTranslator, ann map[string]string, claim bool) bool {
var csiDriverName string
var err error
@ -473,7 +480,7 @@ func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINam
if cmpm.IsMigrationEnabledForPlugin(provisioner) {
csiDriverName, err = translator.GetCSINameFromInTreeName(provisioner)
if err != nil {
klog.Errorf("Could not update volume migration annotations. Migration enabled for plugin %s but could not find corresponding driver name: %v", provisioner, err)
logger.Error(err, "Could not update volume migration annotations. Migration enabled for plugin but could not find corresponding driver name", "plugin", provisioner)
return false
}
if migratedToDriver != csiDriverName {
@ -493,6 +500,7 @@ func updateMigrationAnnotations(cmpm CSIMigratedPluginManager, translator CSINam
// volumeWorker processes items from volumeQueue. It must run only once,
// syncVolume is not assured to be reentrant.
func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
logger := klog.FromContext(ctx)
workFunc := func(ctx context.Context) bool {
keyObj, quit := ctrl.volumeQueue.Get()
if quit {
@ -500,11 +508,11 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
}
defer ctrl.volumeQueue.Done(keyObj)
key := keyObj.(string)
klog.V(5).Infof("volumeWorker[%s]", key)
logger.V(5).Info("volumeWorker", "volumeKey", key)
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
klog.V(4).Infof("error getting name of volume %q to get volume from informer: %v", key, err)
logger.V(4).Info("Error getting name of volume to get volume from informer", "volumeKey", key, "err", err)
return false
}
volume, err := ctrl.volumeLister.Get(name)
@ -515,7 +523,7 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
return false
}
if !errors.IsNotFound(err) {
klog.V(2).Infof("error getting volume %q from informer: %v", key, err)
logger.V(2).Info("Error getting volume from informer", "volumeKey", key, "err", err)
return false
}
@ -523,26 +531,26 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
// "delete"
volumeObj, found, err := ctrl.volumes.store.GetByKey(key)
if err != nil {
klog.V(2).Infof("error getting volume %q from cache: %v", key, err)
logger.V(2).Info("Error getting volume from cache", "volumeKey", key, "err", err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the volume from its cache
klog.V(2).Infof("deletion of volume %q was already processed", key)
logger.V(2).Info("Deletion of volume was already processed", "volumeKey", key)
return false
}
volume, ok := volumeObj.(*v1.PersistentVolume)
if !ok {
klog.Errorf("expected volume, got %+v", volumeObj)
logger.Error(nil, "Expected volume, got", "obj", volumeObj)
return false
}
ctrl.deleteVolume(volume)
ctrl.deleteVolume(ctx, volume)
return false
}
for {
if quit := workFunc(ctx); quit {
klog.Infof("volume worker queue shutting down")
logger.Info("Volume worker queue shutting down")
return
}
}
@ -551,6 +559,7 @@ func (ctrl *PersistentVolumeController) volumeWorker(ctx context.Context) {
// claimWorker processes items from claimQueue. It must run only once,
// syncClaim is not reentrant.
func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
logger := klog.FromContext(ctx)
workFunc := func() bool {
keyObj, quit := ctrl.claimQueue.Get()
if quit {
@ -558,11 +567,11 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
}
defer ctrl.claimQueue.Done(keyObj)
key := keyObj.(string)
klog.V(5).Infof("claimWorker[%s]", key)
logger.V(5).Info("claimWorker", "claimKey", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
klog.V(4).Infof("error getting namespace & name of claim %q to get claim from informer: %v", key, err)
logger.V(4).Info("Error getting namespace & name of claim to get claim from informer", "claimKey", key, "err", err)
return false
}
claim, err := ctrl.claimLister.PersistentVolumeClaims(namespace).Get(name)
@ -573,33 +582,33 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
return false
}
if !errors.IsNotFound(err) {
klog.V(2).Infof("error getting claim %q from informer: %v", key, err)
logger.V(2).Info("Error getting claim from informer", "claimKey", key, "err", err)
return false
}
// The claim is not in informer cache, the event must have been "delete"
claimObj, found, err := ctrl.claims.GetByKey(key)
if err != nil {
klog.V(2).Infof("error getting claim %q from cache: %v", key, err)
logger.V(2).Info("Error getting claim from cache", "claimKey", key, "err", err)
return false
}
if !found {
// The controller has already processed the delete event and
// deleted the claim from its cache
klog.V(2).Infof("deletion of claim %q was already processed", key)
logger.V(2).Info("Deletion of claim was already processed", "claimKey", key)
return false
}
claim, ok := claimObj.(*v1.PersistentVolumeClaim)
if !ok {
klog.Errorf("expected claim, got %+v", claimObj)
logger.Error(nil, "Expected claim, got", "obj", claimObj)
return false
}
ctrl.deleteClaim(claim)
ctrl.deleteClaim(ctx, claim)
return false
}
for {
if quit := workFunc(); quit {
klog.Infof("claim worker queue shutting down")
logger.Info("Claim worker queue shutting down")
return
}
}
@ -608,25 +617,26 @@ func (ctrl *PersistentVolumeController) claimWorker(ctx context.Context) {
// resync supplements short resync period of shared informers - we don't want
// all consumers of PV/PVC shared informer to have a short resync period,
// therefore we do our own.
func (ctrl *PersistentVolumeController) resync() {
klog.V(4).Infof("resyncing PV controller")
func (ctrl *PersistentVolumeController) resync(ctx context.Context) {
logger := klog.FromContext(ctx)
logger.V(4).Info("Resyncing PV controller")
pvcs, err := ctrl.claimLister.List(labels.NewSelector())
if err != nil {
klog.Warningf("cannot list claims: %s", err)
logger.Info("Cannot list claims", "err", err)
return
}
for _, pvc := range pvcs {
ctrl.enqueueWork(ctrl.claimQueue, pvc)
ctrl.enqueueWork(ctx, ctrl.claimQueue, pvc)
}
pvs, err := ctrl.volumeLister.List(labels.NewSelector())
if err != nil {
klog.Warningf("cannot list persistent volumes: %s", err)
logger.Info("Cannot list persistent volumes", "err", err)
return
}
for _, pv := range pvs {
ctrl.enqueueWork(ctrl.volumeQueue, pv)
ctrl.enqueueWork(ctx, ctrl.volumeQueue, pv)
}
}
@ -642,14 +652,15 @@ func (ctrl *PersistentVolumeController) setClaimProvisioner(ctx context.Context,
// modify these, therefore create a copy.
claimClone := claim.DeepCopy()
// TODO: remove the beta storage provisioner anno after the deprecation period
logger := klog.FromContext(ctx)
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnBetaStorageProvisioner, provisionerName)
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, storagehelpers.AnnStorageProvisioner, provisionerName)
updateMigrationAnnotations(ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(context.TODO(), claimClone, metav1.UpdateOptions{})
updateMigrationAnnotations(logger, ctrl.csiMigratedPluginManager, ctrl.translator, claimClone.Annotations, true)
newClaim, err := ctrl.kubeClient.CoreV1().PersistentVolumeClaims(claim.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
if err != nil {
return newClaim, err
}
_, err = ctrl.storeClaimUpdate(newClaim)
_, err = ctrl.storeClaimUpdate(logger, newClaim)
if err != nil {
return newClaim, err
}
@ -678,7 +689,7 @@ func getVolumeStatusForLogging(volume *v1.PersistentVolume) string {
// callback (i.e. with events from etcd) or with an object modified by the
// controller itself. Returns "true", if the cache was updated, false if the
// object is an old version and should be ignored.
func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bool, error) {
func storeObjectUpdate(logger klog.Logger, store cache.Store, obj interface{}, className string) (bool, error) {
objName, err := controller.KeyFunc(obj)
if err != nil {
return false, fmt.Errorf("couldn't get key for object %+v: %w", obj, err)
@ -692,10 +703,9 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
if err != nil {
return false, err
}
if !found {
// This is a new object
klog.V(4).Infof("storeObjectUpdate: adding %s %q, version %s", className, objName, objAccessor.GetResourceVersion())
logger.V(4).Info("storeObjectUpdate, adding obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
if err = store.Add(obj); err != nil {
return false, fmt.Errorf("error adding %s %q to controller cache: %w", className, objName, err)
}
@ -719,11 +729,11 @@ func storeObjectUpdate(store cache.Store, obj interface{}, className string) (bo
// Throw away only older version, let the same version pass - we do want to
// get periodic sync events.
if oldObjResourceVersion > objResourceVersion {
klog.V(4).Infof("storeObjectUpdate: ignoring %s %q version %s", className, objName, objAccessor.GetResourceVersion())
logger.V(4).Info("storeObjectUpdate: ignoring obj", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
return false, nil
}
klog.V(4).Infof("storeObjectUpdate updating %s %q with version %s", className, objName, objAccessor.GetResourceVersion())
logger.V(4).Info("storeObjectUpdate updating obj with version", "storageClassName", className, "objName", objName, "resourceVersion", objAccessor.GetResourceVersion())
if err = store.Update(obj); err != nil {
return false, fmt.Errorf("error updating %s %q in controller cache: %w", className, objName, err)
}

View File

@ -38,6 +38,7 @@ import (
"k8s.io/component-helpers/storage/volume"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
"k8s.io/kubernetes/pkg/features"
@ -310,7 +311,7 @@ func TestControllerSync(t *testing.T) {
},
},
}
_, ctx := ktesting.NewTestContext(t)
doit := func(test controllerTest) {
// Initialize the controller
client := &fake.Clientset{}
@ -324,7 +325,7 @@ func TestControllerSync(t *testing.T) {
client.PrependWatchReactor("pods", core.DefaultWatchReactor(watch.NewFake(), nil))
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
ctrl, err := newTestController(client, informers, true)
ctrl, err := newTestController(ctx, client, informers, true)
if err != nil {
t.Fatalf("Test %q construct persistent volume failed: %v", test.name, err)
}
@ -341,7 +342,7 @@ func TestControllerSync(t *testing.T) {
}
ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
reactor := newVolumeReactor(client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors)
reactor := newVolumeReactor(ctx, client, ctrl, fakeVolumeWatch, fakeClaimWatch, test.errors)
for _, claim := range test.initialClaims {
claim = claim.DeepCopy()
reactor.AddClaim(claim)
@ -380,7 +381,7 @@ func TestControllerSync(t *testing.T) {
}
// Simulate a periodic resync, just in case some events arrived in a
// wrong order.
ctrl.resync()
ctrl.resync(ctx)
err = reactor.waitTest(test)
if err != nil {
@ -388,7 +389,7 @@ func TestControllerSync(t *testing.T) {
}
cancel()
evaluateTestResults(ctrl, reactor.VolumeReactor, test, t)
evaluateTestResults(ctx, ctrl, reactor.VolumeReactor, test, t)
}
for _, test := range tests {
@ -402,7 +403,8 @@ func TestControllerSync(t *testing.T) {
func storeVersion(t *testing.T, prefix string, c cache.Store, version string, expectedReturn bool) {
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = version
ret, err := storeObjectUpdate(c, pv, "volume")
logger, _ := ktesting.NewTestContext(t)
ret, err := storeObjectUpdate(logger, c, pv, "volume")
if err != nil {
t.Errorf("%s: expected storeObjectUpdate to succeed, got: %v", prefix, err)
}
@ -461,7 +463,8 @@ func TestControllerCacheParsingError(t *testing.T) {
pv := newVolume("pvName", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimDelete, classEmpty)
pv.ResourceVersion = "xxx"
_, err := storeObjectUpdate(c, pv, "volume")
logger, _ := ktesting.NewTestContext(t)
_, err := storeObjectUpdate(logger, c, pv, "volume")
if err == nil {
t.Errorf("Expected parsing error, got nil instead")
}
@ -572,19 +575,19 @@ func TestAnnealMigrationAnnotations(t *testing.T) {
translator := csitrans.New()
cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate)
logger, _ := ktesting.NewTestContext(t)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if tc.volumeAnnotations != nil {
ann := tc.volumeAnnotations
updateMigrationAnnotations(cmpm, translator, ann, false)
updateMigrationAnnotations(logger, cmpm, translator, ann, false)
if !reflect.DeepEqual(tc.expVolumeAnnotations, ann) {
t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations)
}
}
if tc.claimAnnotations != nil {
ann := tc.claimAnnotations
updateMigrationAnnotations(cmpm, translator, ann, true)
updateMigrationAnnotations(logger, cmpm, translator, ann, true)
if !reflect.DeepEqual(tc.expClaimAnnotations, ann) {
t.Errorf("got volume annoations: %v, but expected: %v", ann, tc.expVolumeAnnotations)
}
@ -732,13 +735,13 @@ func TestModifyDeletionFinalizers(t *testing.T) {
translator := csitrans.New()
cmpm := csimigration.NewPluginManager(translator, utilfeature.DefaultFeatureGate)
logger, _ := ktesting.NewTestContext(t)
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
if tc.volumeAnnotations != nil {
tc.initialVolume.SetAnnotations(tc.volumeAnnotations)
}
modifiedFinalizers, modified := modifyDeletionFinalizers(cmpm, tc.initialVolume)
modifiedFinalizers, modified := modifyDeletionFinalizers(logger, cmpm, tc.initialVolume)
if modified != tc.expModified {
t.Errorf("got modified: %v, but expected: %v", modified, tc.expModified)
}
@ -881,7 +884,8 @@ func TestRetroactiveStorageClassAssignment(t *testing.T) {
},
},
}
_, ctx := ktesting.NewTestContext(t)
for _, test := range tests {
runSyncTests(t, test.tests, test.storageClasses, nil)
runSyncTests(t, ctx, test.tests, test.storageClasses, nil)
}
}

View File

@ -24,6 +24,7 @@ import (
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2/ktesting"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
)
@ -32,6 +33,7 @@ import (
// 2. Call the syncVolume *once*.
// 3. Compare resulting volumes with expected volumes.
func TestRecycleSync(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
runningPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "runningPod",
@ -139,7 +141,7 @@ func TestRecycleSync(t *testing.T) {
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Delete the volume before recycle operation starts
reactor.DeleteVolume("volume6-6")
}),
@ -155,7 +157,7 @@ func TestRecycleSync(t *testing.T) {
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.MarkVolumeAvailable("volume6-7")
}),
@ -172,7 +174,7 @@ func TestRecycleSync(t *testing.T) {
expectedClaims: noclaims,
expectedEvents: noevents,
errors: noerrors,
test: wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
test: wrapTestWithInjectedOperation(ctx, wrapTestWithReclaimCalls(operationRecycle, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *pvtesting.VolumeReactor) {
// Mark the volume as Available before the recycler starts
reactor.MarkVolumeAvailable("volume6-8")
}),
@ -249,7 +251,7 @@ func TestRecycleSync(t *testing.T) {
test: wrapTestWithReclaimCalls(operationRecycle, []error{nil}, testSyncVolume),
},
}
runSyncTests(t, tests, []*storage.StorageClass{}, pods)
runSyncTests(t, ctx, tests, []*storage.StorageClass{}, pods)
}
// Test multiple calls to syncClaim/syncVolume and periodic sync of all
@ -268,6 +270,7 @@ func TestRecycleSync(t *testing.T) {
//
// Some limit of calls in enforced to prevent endless loops.
func TestRecycleMultiSync(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
tests := []controllerTest{
{
// recycle failure - recycle returns error. The controller should
@ -282,5 +285,5 @@ func TestRecycleMultiSync(t *testing.T) {
},
}
runMultisyncTests(t, tests, []*storage.StorageClass{}, "")
runMultisyncTests(t, ctx, tests, []*storage.StorageClass{}, "")
}

View File

@ -17,8 +17,10 @@ limitations under the License.
package testing
import (
"context"
"errors"
"fmt"
"k8s.io/klog/v2"
"reflect"
"strconv"
"sync"
@ -32,7 +34,6 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/klog/v2"
)
// ErrVersionConflict is the error returned when resource version of requested
@ -87,14 +88,14 @@ type ReactorError struct {
// to evaluate test results.
// All updated objects are also inserted into changedObjects queue and
// optionally sent back to the controller via its watchers.
func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Object, err error) {
func (r *VolumeReactor) React(ctx context.Context, action core.Action) (handled bool, ret runtime.Object, err error) {
r.lock.Lock()
defer r.lock.Unlock()
klog.V(4).Infof("reactor got operation %q on %q", action.GetVerb(), action.GetResource())
logger := klog.FromContext(ctx)
logger.V(4).Info("Reactor got operation", "resource", action.GetResource(), "verb", action.GetVerb())
// Inject error when requested
err = r.injectReactError(action)
err = r.injectReactError(ctx, action)
if err != nil {
return true, nil, err
}
@ -124,7 +125,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
}
r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++
klog.V(4).Infof("created volume %s", volume.Name)
logger.V(4).Info("Created volume", "volumeName", volume.Name)
return true, volume, nil
case action.Matches("create", "persistentvolumeclaims"):
@ -144,7 +145,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
}
r.changedObjects = append(r.changedObjects, claim)
r.changedSinceLastSync++
klog.V(4).Infof("created claim %s", claim.Name)
logger.V(4).Info("Created claim", "PVC", klog.KObj(claim))
return true, claim, nil
case action.Matches("update", "persistentvolumes"):
@ -160,7 +161,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
return true, obj, ErrVersionConflict
}
if reflect.DeepEqual(storedVolume, volume) {
klog.V(4).Infof("nothing updated volume %s", volume.Name)
logger.V(4).Info("Nothing updated volume", "volumeName", volume.Name)
return true, volume, nil
}
// Don't modify the existing object
@ -177,7 +178,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.volumes[volume.Name] = volume
r.changedObjects = append(r.changedObjects, volume)
r.changedSinceLastSync++
klog.V(4).Infof("saved updated volume %s", volume.Name)
logger.V(4).Info("Saved updated volume", "volumeName", volume.Name)
return true, volume, nil
case action.Matches("update", "persistentvolumeclaims"):
@ -193,7 +194,7 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
return true, obj, ErrVersionConflict
}
if reflect.DeepEqual(storedClaim, claim) {
klog.V(4).Infof("nothing updated claim %s", claim.Name)
logger.V(4).Info("Nothing updated claim", "PVC", klog.KObj(claim))
return true, claim, nil
}
// Don't modify the existing object
@ -210,32 +211,33 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
r.claims[claim.Name] = claim
r.changedObjects = append(r.changedObjects, claim)
r.changedSinceLastSync++
klog.V(4).Infof("saved updated claim %s", claim.Name)
logger.V(4).Info("Saved updated claim", "PVC", klog.KObj(claim))
return true, claim, nil
case action.Matches("get", "persistentvolumes"):
name := action.(core.GetAction).GetName()
volume, found := r.volumes[name]
if found {
klog.V(4).Infof("GetVolume: found %s", volume.Name)
logger.V(4).Info("GetVolume: found volume", "volumeName", volume.Name)
return true, volume.DeepCopy(), nil
}
klog.V(4).Infof("GetVolume: volume %s not found", name)
logger.V(4).Info("GetVolume: volume not found", "volumeName", name)
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
case action.Matches("get", "persistentvolumeclaims"):
name := action.(core.GetAction).GetName()
nameSpace := action.(core.GetAction).GetNamespace()
claim, found := r.claims[name]
if found {
klog.V(4).Infof("GetClaim: found %s", claim.Name)
logger.V(4).Info("GetClaim: found claim", "PVC", klog.KObj(claim))
return true, claim.DeepCopy(), nil
}
klog.V(4).Infof("GetClaim: claim %s not found", name)
logger.V(4).Info("GetClaim: claim not found", "PVC", klog.KRef(nameSpace, name))
return true, nil, apierrors.NewNotFound(action.GetResource().GroupResource(), name)
case action.Matches("delete", "persistentvolumes"):
name := action.(core.DeleteAction).GetName()
klog.V(4).Infof("deleted volume %s", name)
logger.V(4).Info("Deleted volume", "volumeName", name)
obj, found := r.volumes[name]
if found {
delete(r.volumes, name)
@ -249,7 +251,8 @@ func (r *VolumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
case action.Matches("delete", "persistentvolumeclaims"):
name := action.(core.DeleteAction).GetName()
klog.V(4).Infof("deleted claim %s", name)
nameSpace := action.(core.DeleteAction).GetNamespace()
logger.V(4).Info("Deleted claim", "PVC", klog.KRef(nameSpace, name))
obj, found := r.claims[name]
if found {
delete(r.claims, name)
@ -297,18 +300,18 @@ func (r *VolumeReactor) getWatches(gvr schema.GroupVersionResource, ns string) [
// injectReactError returns an error when the test requested given action to
// fail. nil is returned otherwise.
func (r *VolumeReactor) injectReactError(action core.Action) error {
func (r *VolumeReactor) injectReactError(ctx context.Context, action core.Action) error {
if len(r.errors) == 0 {
// No more errors to inject, everything should succeed.
return nil
}
logger := klog.FromContext(ctx)
for i, expected := range r.errors {
klog.V(4).Infof("trying to match %q %q with %q %q", expected.Verb, expected.Resource, action.GetVerb(), action.GetResource())
logger.V(4).Info("Trying to match resource verb", "resource", action.GetResource(), "verb", action.GetVerb(), "expectedResource", expected.Resource, "expectedVerb", expected.Verb)
if action.Matches(expected.Verb, expected.Resource) {
// That's the action we're waiting for, remove it from injectedErrors
r.errors = append(r.errors[:i], r.errors[i+1:]...)
klog.V(4).Infof("reactor found matching error at index %d: %q %q, returning %v", i, expected.Verb, expected.Resource, expected.Error)
logger.V(4).Info("Reactor found matching error", "index", i, "expectedResource", expected.Resource, "expectedVerb", expected.Verb, "err", expected.Error)
return expected.Error
}
}
@ -382,7 +385,7 @@ func (r *VolumeReactor) CheckClaims(expectedClaims []*v1.PersistentVolumeClaim)
// PopChange returns one recorded updated object, either *v1.PersistentVolume
// or *v1.PersistentVolumeClaim. Returns nil when there are no changes.
func (r *VolumeReactor) PopChange() interface{} {
func (r *VolumeReactor) PopChange(ctx context.Context) interface{} {
r.lock.Lock()
defer r.lock.Unlock()
@ -391,14 +394,15 @@ func (r *VolumeReactor) PopChange() interface{} {
}
// For debugging purposes, print the queue
logger := klog.FromContext(ctx)
for _, obj := range r.changedObjects {
switch obj.(type) {
case *v1.PersistentVolume:
vol, _ := obj.(*v1.PersistentVolume)
klog.V(4).Infof("reactor queue: %s", vol.Name)
logger.V(4).Info("Reactor queue", "volumeName", vol.Name)
case *v1.PersistentVolumeClaim:
claim, _ := obj.(*v1.PersistentVolumeClaim)
klog.V(4).Infof("reactor queue: %s", claim.Name)
logger.V(4).Info("Reactor queue", "PVC", klog.KObj(claim))
}
}
@ -539,7 +543,7 @@ func (r *VolumeReactor) MarkVolumeAvailable(name string) {
}
// NewVolumeReactor creates a volume reactor.
func NewVolumeReactor(client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []ReactorError) *VolumeReactor {
func NewVolumeReactor(ctx context.Context, client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []ReactorError) *VolumeReactor {
reactor := &VolumeReactor{
volumes: make(map[string]*v1.PersistentVolume),
claims: make(map[string]*v1.PersistentVolumeClaim),
@ -548,13 +552,30 @@ func NewVolumeReactor(client *fake.Clientset, fakeVolumeWatch, fakeClaimWatch *w
errors: errors,
watchers: make(map[schema.GroupVersionResource]map[string][]*watch.RaceFreeFakeWatcher),
}
client.AddReactor("create", "persistentvolumes", reactor.React)
client.AddReactor("create", "persistentvolumeclaims", reactor.React)
client.AddReactor("update", "persistentvolumes", reactor.React)
client.AddReactor("update", "persistentvolumeclaims", reactor.React)
client.AddReactor("get", "persistentvolumes", reactor.React)
client.AddReactor("get", "persistentvolumeclaims", reactor.React)
client.AddReactor("delete", "persistentvolumes", reactor.React)
client.AddReactor("delete", "persistentvolumeclaims", reactor.React)
client.AddReactor("create", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("create", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("update", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("get", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("delete", "persistentvolumes", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
client.AddReactor("delete", "persistentvolumeclaims", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return reactor.React(ctx, action)
})
return reactor
}

View File

@ -123,7 +123,7 @@ func (ctrl *PersistentVolumeController) GetServiceAccountTokenFunc() func(_, _ s
func (ctrl *PersistentVolumeController) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {
klog.Errorf("DeleteServiceAccountToken unsupported in PersistentVolumeController")
klog.ErrorS(nil, "DeleteServiceAccountToken unsupported in PersistentVolumeController")
}
}

View File

@ -55,7 +55,7 @@ type Controller struct {
}
// NewPVCProtectionController returns a new instance of PVCProtectionController.
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) {
func NewPVCProtectionController(logger klog.Logger, pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface) (*Controller, error) {
e := &Controller{
client: cl,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
@ -64,9 +64,11 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
e.pvcLister = pvcInformer.Lister()
e.pvcListerSynced = pvcInformer.Informer().HasSynced
pvcInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.pvcAddedUpdated,
AddFunc: func(obj interface{}) {
e.pvcAddedUpdated(logger, obj)
},
UpdateFunc: func(old, new interface{}) {
e.pvcAddedUpdated(new)
e.pvcAddedUpdated(logger, new)
},
})
@ -78,13 +80,13 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
}
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e.podAddedDeletedUpdated(nil, obj, false)
e.podAddedDeletedUpdated(logger, nil, obj, false)
},
DeleteFunc: func(obj interface{}) {
e.podAddedDeletedUpdated(nil, obj, true)
e.podAddedDeletedUpdated(logger, nil, obj, true)
},
UpdateFunc: func(old, new interface{}) {
e.podAddedDeletedUpdated(old, new, false)
e.podAddedDeletedUpdated(logger, old, new, false)
},
})
@ -96,8 +98,9 @@ func (c *Controller) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.InfoS("Starting PVC protection controller")
defer klog.InfoS("Shutting down PVC protection controller")
logger := klog.FromContext(ctx)
logger.Info("Starting PVC protection controller")
defer logger.Info("Shutting down PVC protection controller")
if !cache.WaitForNamedCacheSync("PVC protection", ctx.Done(), c.pvcListerSynced, c.podListerSynced) {
return
@ -142,15 +145,16 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
}
func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName string) error {
klog.V(4).InfoS("Processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName))
logger := klog.FromContext(ctx)
logger.V(4).Info("Processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName))
startTime := time.Now()
defer func() {
klog.V(4).InfoS("Finished processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName), "duration", time.Since(startTime))
logger.V(4).Info("Finished processing PVC", "PVC", klog.KRef(pvcNamespace, pvcName), "duration", time.Since(startTime))
}()
pvc, err := c.pvcLister.PersistentVolumeClaims(pvcNamespace).Get(pvcName)
if apierrors.IsNotFound(err) {
klog.V(4).InfoS("PVC not found, ignoring", "PVC", klog.KRef(pvcNamespace, pvcName))
logger.V(4).Info("PVC not found, ignoring", "PVC", klog.KRef(pvcNamespace, pvcName))
return nil
}
if err != nil {
@ -167,7 +171,7 @@ func (c *Controller) processPVC(ctx context.Context, pvcNamespace, pvcName strin
if !isUsed {
return c.removeFinalizer(ctx, pvc)
}
klog.V(2).InfoS("Keeping PVC because it is being used", "PVC", klog.KObj(pvc))
logger.V(2).Info("Keeping PVC because it is being used", "PVC", klog.KObj(pvc))
}
if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) {
@ -184,11 +188,12 @@ func (c *Controller) addFinalizer(ctx context.Context, pvc *v1.PersistentVolumeC
claimClone := pvc.DeepCopy()
claimClone.ObjectMeta.Finalizers = append(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil {
klog.ErrorS(err, "Error adding protection finalizer to PVC", "PVC", klog.KObj(pvc))
logger.Error(err, "Error adding protection finalizer to PVC", "PVC", klog.KObj(pvc))
return err
}
klog.V(3).InfoS("Added protection finalizer to PVC", "PVC", klog.KObj(pvc))
logger.V(3).Info("Added protection finalizer to PVC", "PVC", klog.KObj(pvc))
return nil
}
@ -196,11 +201,12 @@ func (c *Controller) removeFinalizer(ctx context.Context, pvc *v1.PersistentVolu
claimClone := pvc.DeepCopy()
claimClone.ObjectMeta.Finalizers = slice.RemoveString(claimClone.ObjectMeta.Finalizers, volumeutil.PVCProtectionFinalizer, nil)
_, err := c.client.CoreV1().PersistentVolumeClaims(claimClone.Namespace).Update(ctx, claimClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil {
klog.ErrorS(err, "Error removing protection finalizer from PVC", "PVC", klog.KObj(pvc))
logger.Error(err, "Error removing protection finalizer from PVC", "PVC", klog.KObj(pvc))
return err
}
klog.V(3).InfoS("Removed protection finalizer from PVC", "PVC", klog.KObj(pvc))
logger.V(3).Info("Removed protection finalizer from PVC", "PVC", klog.KObj(pvc))
return nil
}
@ -208,9 +214,10 @@ func (c *Controller) isBeingUsed(ctx context.Context, pvc *v1.PersistentVolumeCl
// Look for a Pod using pvc in the Informer's cache. If one is found the
// correct decision to keep pvc is taken without doing an expensive live
// list.
if inUse, err := c.askInformer(pvc); err != nil {
logger := klog.FromContext(ctx)
if inUse, err := c.askInformer(logger, pvc); err != nil {
// No need to return because a live list will follow.
klog.Error(err)
logger.Error(err, "")
} else if inUse {
return true, nil
}
@ -222,8 +229,8 @@ func (c *Controller) isBeingUsed(ctx context.Context, pvc *v1.PersistentVolumeCl
return c.askAPIServer(ctx, pvc)
}
func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
klog.V(4).InfoS("Looking for Pods using PVC in the Informer's cache", "PVC", klog.KObj(pvc))
func (c *Controller) askInformer(logger klog.Logger, pvc *v1.PersistentVolumeClaim) (bool, error) {
logger.V(4).Info("Looking for Pods using PVC in the Informer's cache", "PVC", klog.KObj(pvc))
// The indexer is used to find pods which might use the PVC.
objs, err := c.podIndexer.ByIndex(common.PodPVCIndex, fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name))
@ -239,17 +246,18 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
// We still need to look at each volume: that's redundant for volume.PersistentVolumeClaim,
// but for volume.Ephemeral we need to be sure that this particular PVC is the one
// created for the ephemeral volume.
if c.podUsesPVC(pod, pvc) {
if c.podUsesPVC(logger, pod, pvc) {
return true, nil
}
}
klog.V(4).InfoS("No Pod using PVC was found in the Informer's cache", "PVC", klog.KObj(pvc))
logger.V(4).Info("No Pod using PVC was found in the Informer's cache", "PVC", klog.KObj(pvc))
return false, nil
}
func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeClaim) (bool, error) {
klog.V(4).InfoS("Looking for Pods using PVC with a live list", "PVC", klog.KObj(pvc))
logger := klog.FromContext(ctx)
logger.V(4).Info("Looking for Pods using PVC with a live list", "PVC", klog.KObj(pvc))
podsList, err := c.client.CoreV1().Pods(pvc.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
@ -257,16 +265,16 @@ func (c *Controller) askAPIServer(ctx context.Context, pvc *v1.PersistentVolumeC
}
for _, pod := range podsList.Items {
if c.podUsesPVC(&pod, pvc) {
if c.podUsesPVC(logger, &pod, pvc) {
return true, nil
}
}
klog.V(2).InfoS("PVC is unused", "PVC", klog.KObj(pvc))
logger.V(2).Info("PVC is unused", "PVC", klog.KObj(pvc))
return false, nil
}
func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool {
func (c *Controller) podUsesPVC(logger klog.Logger, pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool {
// Check whether pvc is used by pod only if pod is scheduled, because
// kubelet sees pods after they have been scheduled and it won't allow
// starting a pod referencing a PVC with a non-nil deletionTimestamp.
@ -274,7 +282,7 @@ func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == pvc.Name ||
!podIsShutDown(pod) && volume.Ephemeral != nil && ephemeral.VolumeClaimName(pod, &volume) == pvc.Name && ephemeral.VolumeIsForPod(pod, pvc) == nil {
klog.V(2).InfoS("Pod uses PVC", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc))
logger.V(2).Info("Pod uses PVC", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc))
return true
}
}
@ -313,7 +321,7 @@ func podIsShutDown(pod *v1.Pod) bool {
}
// pvcAddedUpdated reacts to pvc added/updated events
func (c *Controller) pvcAddedUpdated(obj interface{}) {
func (c *Controller) pvcAddedUpdated(logger klog.Logger, obj interface{}) {
pvc, ok := obj.(*v1.PersistentVolumeClaim)
if !ok {
utilruntime.HandleError(fmt.Errorf("PVC informer returned non-PVC object: %#v", obj))
@ -324,7 +332,7 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("couldn't get key for Persistent Volume Claim %#v: %v", pvc, err))
return
}
klog.V(4).InfoS("Got event on PVC", "pvc", klog.KObj(pvc))
logger.V(4).Info("Got event on PVC", "pvc", klog.KObj(pvc))
if protectionutil.NeedToAddFinalizer(pvc, volumeutil.PVCProtectionFinalizer) || protectionutil.IsDeletionCandidate(pvc, volumeutil.PVCProtectionFinalizer) {
c.queue.Add(key)
@ -332,9 +340,9 @@ func (c *Controller) pvcAddedUpdated(obj interface{}) {
}
// podAddedDeletedUpdated reacts to Pod events
func (c *Controller) podAddedDeletedUpdated(old, new interface{}, deleted bool) {
func (c *Controller) podAddedDeletedUpdated(logger klog.Logger, old, new interface{}, deleted bool) {
if pod := c.parsePod(new); pod != nil {
c.enqueuePVCs(pod, deleted)
c.enqueuePVCs(logger, pod, deleted)
// An update notification might mask the deletion of a pod X and the
// following creation of a pod Y with the same namespaced name as X. If
@ -342,7 +350,7 @@ func (c *Controller) podAddedDeletedUpdated(old, new interface{}, deleted bool)
// where it is blocking deletion of a PVC not referenced by Y, otherwise
// such PVC will never be deleted.
if oldPod := c.parsePod(old); oldPod != nil && oldPod.UID != pod.UID {
c.enqueuePVCs(oldPod, true)
c.enqueuePVCs(logger, oldPod, true)
}
}
}
@ -367,13 +375,13 @@ func (*Controller) parsePod(obj interface{}) *v1.Pod {
return pod
}
func (c *Controller) enqueuePVCs(pod *v1.Pod, deleted bool) {
func (c *Controller) enqueuePVCs(logger klog.Logger, pod *v1.Pod, deleted bool) {
// Filter out pods that can't help us to remove a finalizer on PVC
if !deleted && !volumeutil.IsPodTerminated(pod, pod.Status) && pod.Spec.NodeName != "" {
return
}
klog.V(4).InfoS("Enqueuing PVCs for Pod", "pod", klog.KObj(pod), "podUID", pod.UID)
logger.V(4).Info("Enqueuing PVCs for Pod", "pod", klog.KObj(pod), "podUID", pod.UID)
// Enqueue all PVCs that the pod uses
for _, volume := range pod.Spec.Volumes {

View File

@ -36,6 +36,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@ -399,7 +400,8 @@ func TestPVCProtectionController(t *testing.T) {
podInformer := informers.Core().V1().Pods()
// Create the controller
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client)
logger, _ := ktesting.NewTestContext(t)
ctrl, err := NewPVCProtectionController(logger, pvcInformer, podInformer, client)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -424,15 +426,15 @@ func TestPVCProtectionController(t *testing.T) {
// Start the test by simulating an event
if test.updatedPVC != nil {
ctrl.pvcAddedUpdated(test.updatedPVC)
ctrl.pvcAddedUpdated(logger, test.updatedPVC)
}
switch {
case test.deletedPod != nil && test.updatedPod != nil && test.deletedPod.Namespace == test.updatedPod.Namespace && test.deletedPod.Name == test.updatedPod.Name:
ctrl.podAddedDeletedUpdated(test.deletedPod, test.updatedPod, false)
ctrl.podAddedDeletedUpdated(logger, test.deletedPod, test.updatedPod, false)
case test.updatedPod != nil:
ctrl.podAddedDeletedUpdated(nil, test.updatedPod, false)
ctrl.podAddedDeletedUpdated(logger, nil, test.updatedPod, false)
case test.deletedPod != nil:
ctrl.podAddedDeletedUpdated(nil, test.deletedPod, true)
ctrl.podAddedDeletedUpdated(logger, nil, test.deletedPod, true)
}
// Process the controller queue until we get expected results

View File

@ -49,7 +49,7 @@ type Controller struct {
}
// NewPVProtectionController returns a new *Controller.
func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
func NewPVProtectionController(logger klog.Logger, pvInformer coreinformers.PersistentVolumeInformer, cl clientset.Interface) *Controller {
e := &Controller{
client: cl,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvprotection"),
@ -58,9 +58,11 @@ func NewPVProtectionController(pvInformer coreinformers.PersistentVolumeInformer
e.pvLister = pvInformer.Lister()
e.pvListerSynced = pvInformer.Informer().HasSynced
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: e.pvAddedUpdated,
AddFunc: func(obj interface{}) {
e.pvAddedUpdated(logger, obj)
},
UpdateFunc: func(old, new interface{}) {
e.pvAddedUpdated(new)
e.pvAddedUpdated(logger, new)
},
})
@ -72,8 +74,9 @@ func (c *Controller) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
klog.Infof("Starting PV protection controller")
defer klog.Infof("Shutting down PV protection controller")
logger := klog.FromContext(ctx)
logger.Info("Starting PV protection controller")
defer logger.Info("Shutting down PV protection controller")
if !cache.WaitForNamedCacheSync("PV protection", ctx.Done(), c.pvListerSynced) {
return
@ -114,15 +117,16 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
}
func (c *Controller) processPV(ctx context.Context, pvName string) error {
klog.V(4).Infof("Processing PV %s", pvName)
logger := klog.FromContext(ctx)
logger.V(4).Info("Processing PV", "PV", klog.KRef("", pvName))
startTime := time.Now()
defer func() {
klog.V(4).Infof("Finished processing PV %s (%v)", pvName, time.Since(startTime))
logger.V(4).Info("Finished processing PV", "PV", klog.KRef("", pvName), "cost", time.Since(startTime))
}()
pv, err := c.pvLister.Get(pvName)
if apierrors.IsNotFound(err) {
klog.V(4).Infof("PV %s not found, ignoring", pvName)
logger.V(4).Info("PV not found, ignoring", "PV", klog.KRef("", pvName))
return nil
}
if err != nil {
@ -136,7 +140,7 @@ func (c *Controller) processPV(ctx context.Context, pvName string) error {
if !isUsed {
return c.removeFinalizer(ctx, pv)
}
klog.V(4).Infof("Keeping PV %s because it is being used", pvName)
logger.V(4).Info("Keeping PV because it is being used", "PV", klog.KRef("", pvName))
}
if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) {
@ -153,11 +157,12 @@ func (c *Controller) addFinalizer(ctx context.Context, pv *v1.PersistentVolume)
pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = append(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer)
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil {
klog.V(3).Infof("Error adding protection finalizer to PV %s: %v", pv.Name, err)
logger.V(3).Info("Error adding protection finalizer to PV", "PV", klog.KObj(pv), "err", err)
return err
}
klog.V(3).Infof("Added protection finalizer to PV %s", pv.Name)
logger.V(3).Info("Added protection finalizer to PV", "PV", klog.KObj(pv))
return nil
}
@ -165,11 +170,12 @@ func (c *Controller) removeFinalizer(ctx context.Context, pv *v1.PersistentVolum
pvClone := pv.DeepCopy()
pvClone.ObjectMeta.Finalizers = slice.RemoveString(pvClone.ObjectMeta.Finalizers, volumeutil.PVProtectionFinalizer, nil)
_, err := c.client.CoreV1().PersistentVolumes().Update(ctx, pvClone, metav1.UpdateOptions{})
logger := klog.FromContext(ctx)
if err != nil {
klog.V(3).Infof("Error removing protection finalizer from PV %s: %v", pv.Name, err)
logger.V(3).Info("Error removing protection finalizer from PV", "PV", klog.KObj(pv), "err", err)
return err
}
klog.V(3).Infof("Removed protection finalizer from PV %s", pv.Name)
logger.V(3).Info("Removed protection finalizer from PV", "PV", klog.KObj(pv))
return nil
}
@ -185,13 +191,13 @@ func (c *Controller) isBeingUsed(pv *v1.PersistentVolume) bool {
}
// pvAddedUpdated reacts to pv added/updated events
func (c *Controller) pvAddedUpdated(obj interface{}) {
func (c *Controller) pvAddedUpdated(logger klog.Logger, obj interface{}) {
pv, ok := obj.(*v1.PersistentVolume)
if !ok {
utilruntime.HandleError(fmt.Errorf("PV informer returned non-PV object: %#v", obj))
return
}
klog.V(4).Infof("Got event on PV %s", pv.Name)
logger.V(4).Info("Got event on PV", "PV", klog.KObj(pv))
if protectionutil.NeedToAddFinalizer(pv, volumeutil.PVProtectionFinalizer) || protectionutil.IsDeletionCandidate(pv, volumeutil.PVProtectionFinalizer) {
c.queue.Add(pv.Name)

View File

@ -35,6 +35,7 @@ import (
"k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@ -210,11 +211,12 @@ func TestPVProtectionController(t *testing.T) {
}
// Create the controller
ctrl := NewPVProtectionController(pvInformer, client)
logger, _ := ktesting.NewTestContext(t)
ctrl := NewPVProtectionController(logger, pvInformer, client)
// Start the test by simulating an event
if test.updatedPV != nil {
ctrl.pvAddedUpdated(test.updatedPV)
ctrl.pvAddedUpdated(logger, test.updatedPV)
}
// Process the controller queue until we get expected results

View File

@ -359,12 +359,13 @@ type mountedPod struct {
}
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
logger klog.Logger,
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName, devicePath string) error {
return asw.addVolume(volumeName, volumeSpec, devicePath)
}
func (asw *actualStateOfWorld) MarkVolumeAsUncertain(
volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error {
logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, _ types.NodeName) error {
return nil
}
@ -473,7 +474,7 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted(markVolumeOpts operationexecu
return asw.AddPodToVolume(markVolumeOpts)
}
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// no operation for kubelet side
}
@ -770,7 +771,7 @@ func (asw *actualStateOfWorld) SetDeviceMountState(
return nil
}
func (asw *actualStateOfWorld) InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) {
func (asw *actualStateOfWorld) InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity) {
asw.Lock()
defer asw.Unlock()

View File

@ -28,6 +28,7 @@ import (
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@ -71,7 +72,8 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
}
// Act
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
// Assert
if err != nil {
@ -115,7 +117,8 @@ func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing.
volumeName := v1.UniqueVolumeName("this-would-never-be-a-volume-name")
// Act
err := asw.MarkVolumeAsAttached(volumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err := asw.MarkVolumeAsAttached(logger, volumeName, volumeSpec, "" /* nodeName */, devicePath)
// Assert
if err != nil {
@ -159,14 +162,14 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
}
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
// Act
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
// Assert
if err != nil {
@ -210,8 +213,8 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
}
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -286,8 +289,8 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
}
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -394,8 +397,8 @@ func Test_AddTwoPodsToVolume_Positive(t *testing.T) {
generatedVolumeName1,
generatedVolumeName2, volumeSpec1, volumeSpec2)
}
err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -534,8 +537,8 @@ func TestActualStateOfWorld_FoundDuringReconstruction(t *testing.T) {
generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec(
plugin, volumeSpec1)
require.NoError(t, err)
err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -611,8 +614,9 @@ func Test_MarkVolumeAsDetached_Negative_PodInVolume(t *testing.T) {
},
},
}
logger, _ := ktesting.NewTestContext(t)
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
err := asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
err := asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -801,8 +805,8 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
}
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -854,8 +858,8 @@ func Test_AddPodToVolume_Positive_SELinux(t *testing.T) {
if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
}
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -933,8 +937,8 @@ func Test_MarkDeviceAsMounted_Positive_SELinux(t *testing.T) {
if err != nil {
t.Fatalf("GetUniqueVolumeNameFromSpec failed. Expected: <no error> Actual: <%v>", err)
}
err = asw.MarkVolumeAsAttached(emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, emptyVolumeName, volumeSpec, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}
@ -980,8 +984,8 @@ func TestUncertainVolumeMounts(t *testing.T) {
generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec(
plugin, volumeSpec1)
require.NoError(t, err)
err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package metrics
import (
"k8s.io/klog/v2/ktesting"
"testing"
v1 "k8s.io/api/core/v1"
@ -74,7 +75,8 @@ func TestMetricCollection(t *testing.T) {
// Add one volume to ActualStateOfWorld
devicePath := "fake/device/path"
err = asw.MarkVolumeAsAttached("", volumeSpec, "", devicePath)
logger, _ := ktesting.NewTestContext(t)
err = asw.MarkVolumeAsAttached(logger, "", volumeSpec, "", devicePath)
if err != nil {
t.Fatalf("MarkVolumeAsAttached failed. Expected: <no error> Actual: <%v>", err)
}

View File

@ -374,7 +374,7 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
dswp.desiredStateOfWorld.UpdatePersistentVolumeSize(uniqueVolumeName, pvCap)
// in case the actualStateOfWorld was rebuild after kubelet restart ensure that claimSize is set to accurate value
dswp.actualStateOfWorld.InitializeClaimSize(uniqueVolumeName, pvcStatusCap)
dswp.actualStateOfWorld.InitializeClaimSize(klog.TODO(), uniqueVolumeName, pvcStatusCap)
}
func getUniqueVolumeName(

View File

@ -17,6 +17,7 @@ limitations under the License.
package populator
import (
"k8s.io/klog/v2/ktesting"
"testing"
"time"
@ -134,7 +135,8 @@ func TestFindAndAddNewPods_WithRescontructedVolume(t *testing.T) {
VolumeSpec: volume.NewSpecFromPersistentVolume(pv, false),
VolumeMountState: operationexecutor.VolumeMounted,
}
dswp.actualStateOfWorld.MarkVolumeAsAttached(opts.VolumeName, opts.VolumeSpec, "fake-node", "")
logger, _ := ktesting.NewTestContext(t)
dswp.actualStateOfWorld.MarkVolumeAsAttached(logger, opts.VolumeName, opts.VolumeSpec, "fake-node", "")
dswp.actualStateOfWorld.MarkVolumeAsMounted(opts)
dswp.findAndAddNewPods()
@ -1393,8 +1395,9 @@ func volumeCapacity(size int) v1.ResourceList {
}
func reconcileASW(asw cache.ActualStateOfWorld, dsw cache.DesiredStateOfWorld, t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
for _, volumeToMount := range dsw.GetVolumesToMount() {
err := asw.MarkVolumeAsAttached(volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "")
err := asw.MarkVolumeAsAttached(logger, volumeToMount.VolumeName, volumeToMount.VolumeSpec, "", "")
if err != nil {
t.Fatalf("Unexpected error when MarkVolumeAsAttached: %v", err)
}

View File

@ -233,6 +233,7 @@ func (rc *reconciler) mountAttachedVolumes(volumeToMount cache.VolumeToMount, po
}
func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) {
logger := klog.TODO()
if rc.controllerAttachDetachEnabled || !volumeToMount.PluginIsAttachable {
//// lets not spin a goroutine and unnecessarily trigger exponential backoff if this happens
if volumeToMount.PluginIsAttachable && !volumeToMount.ReportedInUse {
@ -243,6 +244,7 @@ func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) {
// for controller to finish attaching volume.
klog.V(5).InfoS(volumeToMount.GenerateMsgDetailed("Starting operationExecutor.VerifyControllerAttachedVolume", ""), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.VerifyControllerAttachedVolume(
logger,
volumeToMount.VolumeToMount,
rc.nodeName,
rc.actualStateOfWorld)
@ -261,7 +263,7 @@ func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) {
NodeName: rc.nodeName,
}
klog.V(5).InfoS(volumeToAttach.GenerateMsgDetailed("Starting operationExecutor.AttachVolume", ""), "pod", klog.KObj(volumeToMount.Pod))
err := rc.operationExecutor.AttachVolume(volumeToAttach, rc.actualStateOfWorld)
err := rc.operationExecutor.AttachVolume(logger, volumeToAttach, rc.actualStateOfWorld)
if err != nil && !isExpectedError(err) {
klog.ErrorS(err, volumeToMount.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.AttachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error(), "pod", klog.KObj(volumeToMount.Pod))
}
@ -297,7 +299,7 @@ func (rc *reconciler) unmountDetachDevices() {
// Only detach if kubelet detach is enabled
klog.V(5).InfoS(attachedVolume.GenerateMsgDetailed("Starting operationExecutor.DetachVolume", ""))
err := rc.operationExecutor.DetachVolume(
attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
klog.TODO(), attachedVolume.AttachedVolume, false /* verifySafeToDetach */, rc.actualStateOfWorld)
if err != nil && !isExpectedError(err) {
klog.ErrorS(err, attachedVolume.GenerateErrorDetailed(fmt.Sprintf("operationExecutor.DetachVolume failed (controllerAttachDetachEnabled %v)", rc.controllerAttachDetachEnabled), err).Error())
}

View File

@ -40,6 +40,7 @@ import (
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@ -2425,7 +2426,7 @@ func TestSyncStates(t *testing.T) {
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths)
rcInstance, _ := rc.(*reconciler)
logger, _ := ktesting.NewTestContext(t)
for _, tpodInfo := range tc.podInfos {
pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName)
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
@ -2435,7 +2436,7 @@ func TestSyncStates(t *testing.T) {
if err != nil {
t.Fatalf("error adding volume %s to dsow: %v", volumeSpec.Name(), err)
}
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "")
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
}
rcInstance.syncStates(tmpKubeletPodDir)

View File

@ -140,7 +140,7 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*gl
for _, gvl := range volumesNeedUpdate {
err := rc.actualStateOfWorld.MarkVolumeAsAttached(
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108
gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
klog.TODO(), gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
if err != nil {
klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
continue

View File

@ -107,7 +107,7 @@ func (rc *reconciler) updateStatesNew(reconstructedVolumes map[v1.UniqueVolumeNa
for _, gvl := range reconstructedVolumes {
err := rc.actualStateOfWorld.MarkVolumeAsAttached(
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108
gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
klog.TODO(), gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
if err != nil {
klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
continue

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@ -205,7 +206,7 @@ func TestCleanOrphanVolumes(t *testing.T) {
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths)
rcInstance, _ := rc.(*reconciler)
rcInstance.volumesFailedReconstruction = tc.volumesFailedReconstruction
logger, _ := ktesting.NewTestContext(t)
for _, tpodInfo := range tc.podInfos {
pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName)
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
@ -215,7 +216,7 @@ func TestCleanOrphanVolumes(t *testing.T) {
if err != nil {
t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err)
}
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "")
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
}
// Act
@ -324,7 +325,8 @@ func TestReconstructVolumesMount(t *testing.T) {
if err != nil {
t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err)
}
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "")
logger, _ := ktesting.NewTestContext(t)
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
rcInstance.populatorHasAddedPods = func() bool {
// Mark DSW populated to allow unmounting of volumes.

View File

@ -43,6 +43,7 @@ import (
k8stesting "k8s.io/client-go/testing"
"k8s.io/component-helpers/storage/volume"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/controller"
pvtesting "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
)
@ -150,7 +151,8 @@ type testEnv struct {
func newTestBinder(t *testing.T, stopCh <-chan struct{}) *testEnv {
client := &fake.Clientset{}
reactor := pvtesting.NewVolumeReactor(client, nil, nil, nil)
_, ctx := ktesting.NewTestContext(t)
reactor := pvtesting.NewVolumeReactor(ctx, client, nil, nil, nil)
// TODO refactor all tests to use real watch mechanism, see #72327
client.AddWatchReactor("*", func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()

View File

@ -24,7 +24,6 @@ import (
"strings"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -62,7 +61,7 @@ type azureFileDeleter struct {
azureProvider azureCloudProvider
}
func (plugin *azureFilePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *azureFilePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
klog.V(4).Infof("failed to get azure provider")
@ -102,7 +101,7 @@ func (plugin *azureFilePlugin) newDeleterInternal(spec *volume.Spec, util azureU
}
}
func (plugin *azureFilePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *azureFilePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
azure, resourceGroup, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
klog.V(4).Infof("failed to get azure provider")

View File

@ -228,7 +228,7 @@ func (plugin *gcePersistentDiskPlugin) newUnmounterInternal(volName string, podU
}}, nil
}
func (plugin *gcePersistentDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *gcePersistentDiskPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &GCEDiskUtil{})
}
@ -245,7 +245,7 @@ func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, man
}}, nil
}
func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *gcePersistentDiskPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &GCEDiskUtil{})
}

View File

@ -18,6 +18,7 @@ package hostpath
import (
"fmt"
"k8s.io/klog/v2"
"os"
"regexp"
@ -172,11 +173,11 @@ func (plugin *hostPathPlugin) Recycle(pvName string, spec *volume.Spec, eventRec
return recyclerclient.RecycleVolumeByWatchingPodUntilCompletion(pvName, pod, plugin.host.GetKubeClient(), eventRecorder)
}
func (plugin *hostPathPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *hostPathPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return newDeleter(spec, plugin.host)
}
func (plugin *hostPathPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *hostPathPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
if !plugin.config.ProvisioningEnabled {
return nil, fmt.Errorf("provisioning in volume plugin %q is disabled", plugin.GetPluginName())
}

View File

@ -27,6 +27,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
@ -111,7 +112,8 @@ func TestDeleter(t *testing.T) {
if err != nil {
t.Fatal("Can't find the plugin by name")
}
deleter, err := plug.NewDeleter(spec)
logger, _ := ktesting.NewTestContext(t)
deleter, err := plug.NewDeleter(logger, spec)
if err != nil {
t.Errorf("Failed to make a new Deleter: %v", err)
}
@ -135,13 +137,13 @@ func TestDeleterTempDir(t *testing.T) {
"not-tmp": {true, "/nottmp"},
"good-tmp": {false, "/tmp/scratch"},
}
logger, _ := ktesting.NewTestContext(t)
for name, test := range tests {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
deleter, _ := plug.NewDeleter(spec)
deleter, _ := plug.NewDeleter(logger, spec)
err := deleter.Delete()
if err == nil && test.expectedFailure {
t.Errorf("Expected failure for test '%s' but got nil err", name)
@ -167,7 +169,8 @@ func TestProvisioner(t *testing.T) {
PVC: volumetest.CreateTestPVC("1Gi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
creator, err := plug.NewProvisioner(options)
logger, _ := ktesting.NewTestContext(t)
creator, err := plug.NewProvisioner(logger, options)
if err != nil {
t.Fatalf("Failed to make a new Provisioner: %v", err)
}

View File

@ -213,7 +213,7 @@ type DeletableVolumePlugin interface {
// NewDeleter creates a new volume.Deleter which knows how to delete this
// resource in accordance with the underlying storage provider after the
// volume's release from a claim
NewDeleter(spec *Spec) (Deleter, error)
NewDeleter(logger klog.Logger, spec *Spec) (Deleter, error)
}
// ProvisionableVolumePlugin is an extended interface of VolumePlugin and is
@ -223,7 +223,7 @@ type ProvisionableVolumePlugin interface {
// NewProvisioner creates a new volume.Provisioner which knows how to
// create PersistentVolumes in accordance with the plugin's underlying
// storage provider
NewProvisioner(options VolumeOptions) (Provisioner, error)
NewProvisioner(logger klog.Logger, options VolumeOptions) (Provisioner, error)
}
// AttachableVolumePlugin is an extended interface of VolumePlugin and is used for volumes that require attachment

View File

@ -160,7 +160,7 @@ func (plugin *portworxVolumePlugin) newUnmounterInternal(volName string, podUID
}}, nil
}
func (plugin *portworxVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *portworxVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, plugin.util)
}
@ -178,7 +178,7 @@ func (plugin *portworxVolumePlugin) newDeleterInternal(spec *volume.Spec, manage
}}, nil
}
func (plugin *portworxVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *portworxVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, plugin.util)
}

View File

@ -592,7 +592,7 @@ func (plugin *rbdPlugin) getDeviceNameFromOldMountPath(mounter mount.Interface,
return "", fmt.Errorf("can't find source name from mounted path: %s", mountPath)
}
func (plugin *rbdPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *rbdPlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.RBD == nil {
return nil, fmt.Errorf("spec.PersistentVolume.Spec.RBD is nil")
}
@ -615,7 +615,7 @@ func (plugin *rbdPlugin) newDeleterInternal(spec *volume.Spec, admin, secret str
}}, nil
}
func (plugin *rbdPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *rbdPlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &rbdUtil{})
}

View File

@ -18,6 +18,7 @@ package testing
import (
"fmt"
"k8s.io/klog/v2"
"os"
"path/filepath"
goruntime "runtime"
@ -441,11 +442,11 @@ func (plugin *FakeVolumePlugin) Recycle(pvName string, spec *volume.Spec, eventR
return nil
}
func (plugin *FakeVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *FakeVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return &FakeDeleter{"/attributesTransferredFromSpec", volume.MetricsNil{}}, nil
}
func (plugin *FakeVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *FakeVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
plugin.Lock()
defer plugin.Unlock()
plugin.LastProvisionerOptions = options

View File

@ -17,10 +17,11 @@ limitations under the License.
package operationexecutor
import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/kubernetes/pkg/volume"
@ -54,11 +55,11 @@ func (f *fakeOGCounter) GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume,
return f.recordFuncCall("GenerateUnmountVolumeFunc"), nil
}
func (f *fakeOGCounter) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
func (f *fakeOGCounter) GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
return f.recordFuncCall("GenerateAttachVolumeFunc")
}
func (f *fakeOGCounter) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
func (f *fakeOGCounter) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateDetachVolumeFunc"), nil
}
@ -70,7 +71,7 @@ func (f *fakeOGCounter) GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume,
return f.recordFuncCall("GenerateUnmountDeviceFunc"), nil
}
func (f *fakeOGCounter) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
func (f *fakeOGCounter) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
return f.recordFuncCall("GenerateVerifyControllerAttachedVolumeFunc"), nil
}

View File

@ -65,7 +65,7 @@ import (
type OperationExecutor interface {
// AttachVolume attaches the volume to the node specified in volumeToAttach.
// It then updates the actual state of the world to reflect that.
AttachVolume(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
AttachVolume(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// VerifyVolumesAreAttachedPerNode verifies the given list of volumes to see whether they are still attached to the node.
// If any volume is not attached right now, it will update the actual state of the world to reflect that.
@ -83,7 +83,7 @@ type OperationExecutor interface {
// that. If verifySafeToDetach is set, a call is made to the fetch the node
// object and it is used to verify that the volume does not exist in Node's
// Status.VolumesInUse list (operation fails with error if it is).
DetachVolume(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
DetachVolume(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// If a volume has 'Filesystem' volumeMode, MountVolume mounts the
// volume to the pod specified in volumeToMount.
@ -139,7 +139,7 @@ type OperationExecutor interface {
// If the volume is not found or there is an error (fetching the node
// object, for example) then an error is returned which triggers exponential
// back off on retries.
VerifyControllerAttachedVolume(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
VerifyControllerAttachedVolume(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) error
// IsOperationPending returns true if an operation for the given volumeName
// and one of podName or nodeName is pending, otherwise it returns false
@ -245,13 +245,13 @@ type ActualStateOfWorldAttacherUpdater interface {
// TODO: in the future, we should be able to remove the volumeName
// argument to this method -- since it is used only for attachable
// volumes. See issue 29695.
MarkVolumeAsAttached(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
MarkVolumeAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error
// Marks the specified volume as *possibly* attached to the specified node.
// If an attach operation fails, the attach/detach controller does not know for certain if the volume is attached or not.
// If the volume name is supplied, that volume name will be used. If not, the
// volume name is computed using the result from querying the plugin.
MarkVolumeAsUncertain(volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error
MarkVolumeAsUncertain(logger klog.Logger, volumeName v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName) error
// Marks the specified volume as detached from the specified node
MarkVolumeAsDetached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
@ -262,10 +262,10 @@ type ActualStateOfWorldAttacherUpdater interface {
// Unmarks the desire to detach for the specified volume (add the volume back to
// the node's volumesToReportAsAttached list)
AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
AddVolumeToReportAsAttached(logger klog.Logger, volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// InitializeClaimSize sets pvc claim size by reading pvc.Status.Capacity
InitializeClaimSize(volumeName v1.UniqueVolumeName, claimSize *resource.Quantity)
InitializeClaimSize(logger klog.Logger, volumeName v1.UniqueVolumeName, claimSize *resource.Quantity)
GetClaimSize(volumeName v1.UniqueVolumeName) *resource.Quantity
}
@ -789,10 +789,11 @@ func (oe *operationExecutor) IsOperationSafeToRetry(
}
func (oe *operationExecutor) AttachVolume(
logger klog.Logger,
volumeToAttach VolumeToAttach,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations :=
oe.operationGenerator.GenerateAttachVolumeFunc(volumeToAttach, actualStateOfWorld)
oe.operationGenerator.GenerateAttachVolumeFunc(logger, volumeToAttach, actualStateOfWorld)
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
return oe.pendingOperations.Run(
@ -804,11 +805,12 @@ func (oe *operationExecutor) AttachVolume(
}
func (oe *operationExecutor) DetachVolume(
logger klog.Logger,
volumeToDetach AttachedVolume,
verifySafeToDetach bool,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateDetachVolumeFunc(volumeToDetach, verifySafeToDetach, actualStateOfWorld)
oe.operationGenerator.GenerateDetachVolumeFunc(logger, volumeToDetach, verifySafeToDetach, actualStateOfWorld)
if err != nil {
return err
}
@ -1039,11 +1041,12 @@ func (oe *operationExecutor) ExpandInUseVolume(volumeToMount VolumeToMount, actu
}
func (oe *operationExecutor) VerifyControllerAttachedVolume(
logger klog.Logger,
volumeToMount VolumeToMount,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) error {
generatedOperations, err :=
oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(volumeToMount, nodeName, actualStateOfWorld)
oe.operationGenerator.GenerateVerifyControllerAttachedVolumeFunc(logger, volumeToMount, nodeName, actualStateOfWorld)
if err != nil {
return err
}

View File

@ -18,16 +18,18 @@ package operationexecutor
import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
"strconv"
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
csitrans "k8s.io/csi-translation-lib"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
@ -59,7 +61,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachableAndNonDevi
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithSecret(podName, secretName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
@ -87,7 +89,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins(t *te
volumeName := v1.UniqueVolumeName(pdName)
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
@ -114,7 +116,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForDeviceMountablePlugins(
volumeName := v1.UniqueVolumeName(pdName)
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
@ -209,7 +211,8 @@ func TestOperationExecutor_AttachSingleNodeVolumeConcurrentlyToSameNode(t *testi
},
},
}
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
@ -239,7 +242,8 @@ func TestOperationExecutor_AttachMultiNodeVolumeConcurrentlyToSameNode(t *testin
},
},
}
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
@ -269,7 +273,8 @@ func TestOperationExecutor_AttachSingleNodeVolumeConcurrentlyToDifferentNodes(t
},
},
}
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
@ -297,7 +302,8 @@ func TestOperationExecutor_AttachMultiNodeVolumeConcurrentlyToDifferentNodes(t *
},
},
}
oe.AttachVolume(volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.AttachVolume(logger, volumesToAttach[i], nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
@ -327,7 +333,8 @@ func TestOperationExecutor_DetachSingleNodeVolumeConcurrentlyFromSameNode(t *tes
},
},
}
oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
@ -357,7 +364,8 @@ func TestOperationExecutor_DetachMultiNodeVolumeConcurrentlyFromSameNode(t *test
},
},
}
oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
@ -385,7 +393,8 @@ func TestOperationExecutor_DetachMultiNodeVolumeConcurrentlyFromDifferentNodes(t
},
},
}
oe.DetachVolume(attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.DetachVolume(logger, attachedVolumes[i], true /* verifySafeToDetach */, nil /* actualStateOfWorldAttacherUpdater */)
}
// Assert
@ -440,7 +449,8 @@ func TestOperationExecutor_VerifyControllerAttachedVolumeConcurrently(t *testing
volumesToMount[i] = VolumeToMount{
VolumeName: v1.UniqueVolumeName(pdName),
}
oe.VerifyControllerAttachedVolume(volumesToMount[i], types.NodeName("node-name"), nil /* actualStateOfWorldMounterUpdater */)
logger, _ := ktesting.NewTestContext(t)
oe.VerifyControllerAttachedVolume(logger, volumesToMount[i], types.NodeName("node-name"), nil /* actualStateOfWorldMounterUpdater */)
}
// Assert
@ -460,7 +470,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForNonAttachablePlugins_Vo
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithSecret(podName, secretName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
@ -491,7 +501,7 @@ func TestOperationExecutor_MountVolume_ConcurrentMountForAttachablePlugins_Volum
// Act
for i := range volumesToMount {
podName := "pod-" + strconv.Itoa((i + 1))
podName := "pod-" + strconv.Itoa(i+1)
pod := getTestPodWithGCEPD(podName, pdName)
volumesToMount[i] = VolumeToMount{
Pod: pod,
@ -603,7 +613,7 @@ func (fopg *fakeOperationGenerator) GenerateUnmountVolumeFunc(volumeToUnmount Mo
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
opFunc := func() volumetypes.OperationContext {
startOperationAndBlock(fopg.ch, fopg.quit)
return volumetypes.NewOperationContext(nil, nil, false)
@ -612,7 +622,7 @@ func (fopg *fakeOperationGenerator) GenerateAttachVolumeFunc(volumeToAttach Volu
OperationFunc: opFunc,
}
}
func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
func (fopg *fakeOperationGenerator) GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() volumetypes.OperationContext {
startOperationAndBlock(fopg.ch, fopg.quit)
return volumetypes.NewOperationContext(nil, nil, false)
@ -639,7 +649,7 @@ func (fopg *fakeOperationGenerator) GenerateUnmountDeviceFunc(deviceToDetach Att
OperationFunc: opFunc,
}, nil
}
func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
func (fopg *fakeOperationGenerator) GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
opFunc := func() volumetypes.OperationContext {
startOperationAndBlock(fopg.ch, fopg.quit)
return volumetypes.NewOperationContext(nil, nil, false)

View File

@ -121,10 +121,10 @@ type OperationGenerator interface {
GenerateUnmountVolumeFunc(volumeToUnmount MountedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, podsDir string) (volumetypes.GeneratedOperations, error)
// Generates the AttachVolume function needed to perform attach of a volume plugin
GenerateAttachVolumeFunc(volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations
GenerateAttachVolumeFunc(logger klog.Logger, volumeToAttach VolumeToAttach, actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations
// Generates the DetachVolume function needed to perform the detach of a volume plugin
GenerateDetachVolumeFunc(volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
GenerateDetachVolumeFunc(logger klog.Logger, volumeToDetach AttachedVolume, verifySafeToDetach bool, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
// Generates the VolumesAreAttached function needed to verify if volume plugins are attached
GenerateVolumesAreAttachedFunc(attachedVolumes []AttachedVolume, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
@ -133,7 +133,7 @@ type OperationGenerator interface {
GenerateUnmountDeviceFunc(deviceToDetach AttachedVolume, actualStateOfWorld ActualStateOfWorldMounterUpdater, mounter hostutil.HostUtils) (volumetypes.GeneratedOperations, error)
// Generates the function needed to check if the attach_detach controller has attached the volume plugin
GenerateVerifyControllerAttachedVolumeFunc(volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
GenerateVerifyControllerAttachedVolumeFunc(logger klog.Logger, volumeToMount VolumeToMount, nodeName types.NodeName, actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error)
// Generates the MapVolume function needed to perform the map of a volume plugin
GenerateMapVolumeFunc(waitForAttachTimeout time.Duration, volumeToMount VolumeToMount, actualStateOfWorldMounterUpdater ActualStateOfWorldMounterUpdater) (volumetypes.GeneratedOperations, error)
@ -348,6 +348,7 @@ func (og *operationGenerator) GenerateBulkVolumeVerifyFunc(
}
func (og *operationGenerator) GenerateAttachVolumeFunc(
logger klog.Logger,
volumeToAttach VolumeToAttach,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) volumetypes.GeneratedOperations {
@ -378,6 +379,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
uncertainNode = derr.CurrentNode
}
addErr := actualStateOfWorld.MarkVolumeAsUncertain(
logger,
volumeToAttach.VolumeName,
volumeToAttach.VolumeSpec,
uncertainNode)
@ -399,7 +401,7 @@ func (og *operationGenerator) GenerateAttachVolumeFunc(
// Update actual state of world
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath)
logger, v1.UniqueVolumeName(""), volumeToAttach.VolumeSpec, volumeToAttach.NodeName, devicePath)
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToAttach.GenerateError("AttachVolume.MarkVolumeAsAttached failed", addVolumeNodeErr)
@ -447,6 +449,7 @@ func (og *operationGenerator) GetCSITranslator() InTreeToCSITranslator {
}
func (og *operationGenerator) GenerateDetachVolumeFunc(
logger klog.Logger,
volumeToDetach AttachedVolume,
verifySafeToDetach bool,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
@ -505,7 +508,7 @@ func (og *operationGenerator) GenerateDetachVolumeFunc(
if err != nil {
// On failure, add volume back to ReportAsAttached list
actualStateOfWorld.AddVolumeToReportAsAttached(
volumeToDetach.VolumeName, volumeToDetach.NodeName)
logger, volumeToDetach.VolumeName, volumeToDetach.NodeName)
eventErr, detailedErr := volumeToDetach.GenerateError("DetachVolume.Detach failed", err)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
@ -1501,6 +1504,7 @@ func (og *operationGenerator) GenerateUnmapDeviceFunc(
}
func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
logger klog.Logger,
volumeToMount VolumeToMount,
nodeName types.NodeName,
actualStateOfWorld ActualStateOfWorldAttacherUpdater) (volumetypes.GeneratedOperations, error) {
@ -1548,13 +1552,13 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
// updated accordingly.
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */)
logger, volumeToMount.VolumeName, volumeToMount.VolumeSpec, nodeName, "" /* devicePath */)
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttachedByUniqueVolumeName failed", addVolumeNodeErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
actualStateOfWorld.InitializeClaimSize(volumeToMount.VolumeName, claimSize)
actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
@ -1588,14 +1592,14 @@ func (og *operationGenerator) GenerateVerifyControllerAttachedVolumeFunc(
for _, attachedVolume := range node.Status.VolumesAttached {
if attachedVolume.Name == volumeToMount.VolumeName {
addVolumeNodeErr := actualStateOfWorld.MarkVolumeAsAttached(
v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath)
logger, v1.UniqueVolumeName(""), volumeToMount.VolumeSpec, nodeName, attachedVolume.DevicePath)
klog.InfoS(volumeToMount.GenerateMsgDetailed("Controller attach succeeded", fmt.Sprintf("device path: %q", attachedVolume.DevicePath)), "pod", klog.KObj(volumeToMount.Pod))
if addVolumeNodeErr != nil {
// On failure, return error. Caller will log and retry.
eventErr, detailedErr := volumeToMount.GenerateError("VerifyControllerAttachedVolume.MarkVolumeAsAttached failed", addVolumeNodeErr)
return volumetypes.NewOperationContext(eventErr, detailedErr, migrated)
}
actualStateOfWorld.InitializeClaimSize(volumeToMount.VolumeName, claimSize)
actualStateOfWorld.InitializeClaimSize(logger, volumeToMount.VolumeName, claimSize)
return volumetypes.NewOperationContext(nil, nil, migrated)
}
}

View File

@ -324,7 +324,7 @@ type vsphereVolumeDeleter struct {
var _ volume.Deleter = &vsphereVolumeDeleter{}
func (plugin *vsphereVolumePlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
func (plugin *vsphereVolumePlugin) NewDeleter(logger klog.Logger, spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &VsphereDiskUtil{})
}
@ -353,7 +353,7 @@ type vsphereVolumeProvisioner struct {
var _ volume.Provisioner = &vsphereVolumeProvisioner{}
func (plugin *vsphereVolumePlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
func (plugin *vsphereVolumePlugin) NewProvisioner(logger klog.Logger, options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &VsphereDiskUtil{})
}

View File

@ -31,6 +31,7 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
fakecloud "k8s.io/cloud-provider/fake"
"k8s.io/klog/v2/ktesting"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
volumecache "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
@ -161,7 +162,6 @@ func TestPodDeletionWithDswp(t *testing.T) {
defer framework.DeleteNamespaceOrDie(testClient, ns, t)
pod := fakePodWithVol(namespaceName)
podStopCh := make(chan struct{})
if _, err := testClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to created node : %v", err)
@ -183,7 +183,7 @@ func TestPodDeletionWithDswp(t *testing.T) {
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done())
go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx)
@ -201,7 +201,6 @@ func TestPodDeletionWithDswp(t *testing.T) {
waitForPodsInDSWP(t, ctrl.GetDesiredStateOfWorld())
// let's stop pod events from getting triggered
close(podStopCh)
err = podInformer.GetStore().Delete(podInformerObj)
if err != nil {
t.Fatalf("Error deleting pod : %v", err)
@ -262,7 +261,7 @@ func TestPodUpdateWithWithADC(t *testing.T) {
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done())
go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx)
@ -335,7 +334,7 @@ func TestPodUpdateWithKeepTerminatedPodVolumes(t *testing.T) {
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done())
go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx)
@ -426,7 +425,9 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync
plugins := []volume.VolumePlugin{plugin}
cloud := &fakecloud.Cloud{}
informers := clientgoinformers.NewSharedInformerFactory(testClient, resyncPeriod)
logger, ctx := ktesting.NewTestContext(t)
ctrl, err := attachdetach.NewAttachDetachController(
logger,
testClient,
informers.Core().V1().Pods(),
informers.Core().V1().Nodes(),
@ -463,7 +464,7 @@ func createAdClients(t *testing.T, server *kubeapiservertesting.TestServer, sync
NodeInformer: informers.Core().V1().Nodes(),
EnableDynamicProvisioning: false,
}
pvCtrl, err := persistentvolume.NewController(params)
pvCtrl, err := persistentvolume.NewController(ctx, params)
if err != nil {
t.Fatalf("Failed to create PV controller: %v", err)
}
@ -509,14 +510,15 @@ func TestPodAddedByDswp(t *testing.T) {
go podInformer.Run(podStopCh)
// start controller loop
ctx, cancel := context.WithCancel(context.Background())
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go informers.Core().V1().PersistentVolumeClaims().Informer().Run(ctx.Done())
go informers.Core().V1().PersistentVolumes().Informer().Run(ctx.Done())
go informers.Storage().V1().VolumeAttachments().Informer().Run(ctx.Done())
initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done())
go ctrl.Run(ctx)
// Run pvCtrl to avoid leaking goroutines started during its creation.
go pvCtrl.Run(ctx)
@ -605,7 +607,7 @@ func TestPVCBoundWithADC(t *testing.T) {
informers.Start(ctx.Done())
informers.WaitForCacheSync(ctx.Done())
initCSIObjects(ctx.Done(), informers)
go ctrl.Run(ctx.Done())
go ctrl.Run(ctx)
go pvCtrl.Run(ctx)
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4)

View File

@ -47,6 +47,7 @@ import (
"k8s.io/kubernetes/test/integration/framework"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
)
// Several tests in this file are configurable by environment variables:
@ -1357,7 +1358,9 @@ func createClients(namespaceName string, t *testing.T, s *kubeapiservertesting.T
plugins := []volume.VolumePlugin{plugin}
cloud := &fakecloud.Cloud{}
informers := informers.NewSharedInformerFactory(testClient, getSyncPeriod(syncPeriod))
_, ctx := ktesting.NewTestContext(t)
ctrl, err := persistentvolumecontroller.NewController(
ctx,
persistentvolumecontroller.ControllerParameters{
KubeClient: binderClient,
SyncPeriod: getSyncPeriod(syncPeriod),

View File

@ -28,6 +28,7 @@ import (
"time"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
@ -1128,8 +1129,8 @@ func initPVController(t *testing.T, testCtx *testutil.TestContext, provisionDela
NodeInformer: informerFactory.Core().V1().Nodes(),
EnableDynamicProvisioning: true,
}
ctrl, err := persistentvolume.NewController(params)
_, ctx := ktesting.NewTestContext(t)
ctrl, err := persistentvolume.NewController(ctx, params)
if err != nil {
return nil, nil, err
}