Merge pull request #122082 from carlory/remove-keep-terminated-pod-volumes
keep-terminated-pod-volumes flag on kubelet is removed
This commit is contained in:
@@ -360,7 +360,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
maxPerPodContainerCount int32,
|
||||
maxContainerCount int32,
|
||||
registerSchedulable bool,
|
||||
keepTerminatedPodVolumes bool,
|
||||
nodeLabels map[string]string,
|
||||
nodeStatusMaxImages int32,
|
||||
seccompDefault bool,
|
||||
@@ -561,7 +560,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
clock: clock.RealClock{},
|
||||
enableControllerAttachDetach: kubeCfg.EnableControllerAttachDetach,
|
||||
makeIPTablesUtilChains: kubeCfg.MakeIPTablesUtilChains,
|
||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
||||
nodeStatusMaxImages: nodeStatusMaxImages,
|
||||
tracer: tracer,
|
||||
nodeStartupLatencyTracker: kubeDeps.NodeStartupLatencyTracker,
|
||||
@@ -849,7 +847,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
||||
kubeDeps.HostUtil,
|
||||
klet.getPodsDir(),
|
||||
kubeDeps.Recorder,
|
||||
keepTerminatedPodVolumes,
|
||||
volumepathhandler.NewBlockVolumePathHandler())
|
||||
|
||||
klet.backOff = flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
|
||||
@@ -1304,10 +1301,6 @@ type Kubelet struct {
|
||||
// StatsProvider provides the node and the container stats.
|
||||
StatsProvider *stats.Provider
|
||||
|
||||
// This flag, if set, instructs the kubelet to keep volumes from terminated pods mounted to the node.
|
||||
// This can be useful for debugging volume related issues.
|
||||
keepTerminatedPodVolumes bool // DEPRECATED
|
||||
|
||||
// pluginmanager runs a set of asynchronous loops that figure out which
|
||||
// plugins need to be registered/unregistered based on this node and makes it so.
|
||||
pluginManager pluginmanager.PluginManager
|
||||
@@ -2167,20 +2160,18 @@ func (kl *Kubelet) SyncTerminatedPod(ctx context.Context, pod *v1.Pod, podStatus
|
||||
}
|
||||
klog.V(4).InfoS("Pod termination unmounted volumes", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
|
||||
if !kl.keepTerminatedPodVolumes {
|
||||
// This waiting loop relies on the background cleanup which starts after pod workers respond
|
||||
// true for ShouldPodRuntimeBeRemoved, which happens after `SyncTerminatingPod` is completed.
|
||||
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
volumesExist := kl.podVolumesExist(pod.UID)
|
||||
if volumesExist {
|
||||
klog.V(3).InfoS("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
}
|
||||
return !volumesExist, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
// This waiting loop relies on the background cleanup which starts after pod workers respond
|
||||
// true for ShouldPodRuntimeBeRemoved, which happens after `SyncTerminatingPod` is completed.
|
||||
if err := wait.PollUntilContextCancel(ctx, 100*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
volumesExist := kl.podVolumesExist(pod.UID)
|
||||
if volumesExist {
|
||||
klog.V(3).InfoS("Pod is terminated, but some volumes have not been cleaned up", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
}
|
||||
klog.V(3).InfoS("Pod termination cleaned up volume paths", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
return !volumesExist, nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.V(3).InfoS("Pod termination cleaned up volume paths", "pod", klog.KObj(pod), "podUID", pod.UID)
|
||||
|
||||
// After volume unmount is complete, let the secret and configmap managers know we're done with this pod
|
||||
if kl.secretManager != nil {
|
||||
|
||||
Reference in New Issue
Block a user