diff --git a/pkg/client/tests/portfoward_test.go b/pkg/client/tests/portfoward_test.go index caf953e2605..79f84a4d7fc 100644 --- a/pkg/client/tests/portfoward_test.go +++ b/pkg/client/tests/portfoward_test.go @@ -18,7 +18,6 @@ package tests import ( "bytes" - "context" "fmt" "io" "net" @@ -52,7 +51,7 @@ type fakePortForwarder struct { var _ portforward.PortForwarder = &fakePortForwarder{} -func (pf *fakePortForwarder) PortForward(_ context.Context, name string, uid types.UID, port int32, stream io.ReadWriteCloser) error { +func (pf *fakePortForwarder) PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error { defer stream.Close() // read from the client diff --git a/pkg/client/tests/remotecommand_test.go b/pkg/client/tests/remotecommand_test.go index cad7dae0fd5..910cfce38f8 100644 --- a/pkg/client/tests/remotecommand_test.go +++ b/pkg/client/tests/remotecommand_test.go @@ -18,7 +18,6 @@ package tests import ( "bytes" - "context" "errors" "fmt" "io" @@ -59,11 +58,11 @@ type fakeExecutor struct { exec bool } -func (ex *fakeExecutor) ExecInContainer(_ context.Context, name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize, timeout time.Duration) error { +func (ex *fakeExecutor) ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize, timeout time.Duration) error { return ex.run(name, uid, container, cmd, in, out, err, tty) } -func (ex *fakeExecutor) AttachContainer(_ context.Context, name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize) error { +func (ex *fakeExecutor) AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remoteclient.TerminalSize) error { return ex.run(name, uid, container, nil, in, out, err, tty) } diff --git a/pkg/kubelet/cm/container_manager_linux.go b/pkg/kubelet/cm/container_manager_linux.go index ec46af60bde..c7fcbaff8b0 100644 --- a/pkg/kubelet/cm/container_manager_linux.go +++ b/pkg/kubelet/cm/container_manager_linux.go @@ -21,7 +21,6 @@ package cm import ( "bytes" - "context" "fmt" "os" "path" @@ -558,11 +557,10 @@ func (cm *containerManagerImpl) Start(node *v1.Node, podStatusProvider status.PodStatusProvider, runtimeService internalapi.RuntimeService, localStorageCapacityIsolation bool) error { - ctx := context.Background() // Initialize CPU manager if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManager) { - containerMap := buildContainerMapFromRuntime(ctx, runtimeService) + containerMap := buildContainerMapFromRuntime(runtimeService) err := cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap) if err != nil { return fmt.Errorf("start cpu manager error: %v", err) @@ -571,7 +569,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node, // Initialize memory manager if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryManager) { - containerMap := buildContainerMapFromRuntime(ctx, runtimeService) + containerMap := buildContainerMapFromRuntime(runtimeService) err := cm.memoryManager.Start(memorymanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap) if err != nil { return fmt.Errorf("start memory manager error: %v", err) @@ -729,15 +727,15 @@ func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList { } } -func buildContainerMapFromRuntime(ctx context.Context, runtimeService internalapi.RuntimeService) containermap.ContainerMap { +func buildContainerMapFromRuntime(runtimeService internalapi.RuntimeService) containermap.ContainerMap { podSandboxMap := make(map[string]string) - podSandboxList, _ := runtimeService.ListPodSandbox(ctx, nil) + podSandboxList, _ := runtimeService.ListPodSandbox(nil) for _, p := range podSandboxList { podSandboxMap[p.Id] = p.Metadata.Uid } containerMap := containermap.NewContainerMap() - containerList, _ := runtimeService.ListContainers(ctx, nil) + containerList, _ := runtimeService.ListContainers(nil) for _, c := range containerList { if _, exists := podSandboxMap[c.PodSandboxId]; !exists { klog.InfoS("no PodSandBox found for the container", "podSandboxId", c.PodSandboxId, "containerName", c.Metadata.Name, "containerId", c.Id) diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index 443eecd2d36..ae8eae9d76d 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -17,7 +17,6 @@ limitations under the License. package cpumanager import ( - "context" "fmt" "math" "sync" @@ -43,7 +42,7 @@ import ( type ActivePodsFunc func() []*v1.Pod type runtimeService interface { - UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error + UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error } type policyName string @@ -402,7 +401,6 @@ func (m *manager) removeStaleState() { } func (m *manager) reconcileState() (success []reconciledContainer, failure []reconciledContainer) { - ctx := context.Background() success = []reconciledContainer{} failure = []reconciledContainer{} @@ -471,7 +469,7 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name) if !cset.Equals(lcset) { klog.V(4).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset) - err = m.updateContainerCPUSet(ctx, containerID, cset) + err = m.updateContainerCPUSet(containerID, cset) if err != nil { klog.ErrorS(err, "ReconcileState: failed to update container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset) failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID}) @@ -510,13 +508,12 @@ func findContainerStatusByName(status *v1.PodStatus, name string) (*v1.Container return nil, fmt.Errorf("unable to find status for container with name %v in pod status (it may not be running)", name) } -func (m *manager) updateContainerCPUSet(ctx context.Context, containerID string, cpus cpuset.CPUSet) error { +func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet) error { // TODO: Consider adding a `ResourceConfigForContainer` helper in // helpers_linux.go similar to what exists for pods. // It would be better to pass the full container resources here instead of // this patch-like partial resources. return m.containerRuntime.UpdateContainerResources( - ctx, containerID, &runtimeapi.ContainerResources{ Linux: &runtimeapi.LinuxContainerResources{ diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index 29941611a53..2beb7b35af2 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package cpumanager import ( - "context" "fmt" "os" "reflect" @@ -128,7 +127,7 @@ type mockRuntimeService struct { err error } -func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error { +func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error { return rt.err } diff --git a/pkg/kubelet/cm/memorymanager/memory_manager.go b/pkg/kubelet/cm/memorymanager/memory_manager.go index b8c55b74597..c567871f1b0 100644 --- a/pkg/kubelet/cm/memorymanager/memory_manager.go +++ b/pkg/kubelet/cm/memorymanager/memory_manager.go @@ -17,7 +17,6 @@ limitations under the License. package memorymanager import ( - "context" "fmt" "sync" @@ -44,7 +43,7 @@ const memoryManagerStateFileName = "memory_manager_state" type ActivePodsFunc func() []*v1.Pod type runtimeService interface { - UpdateContainerResources(ctx context.Context, id string, resources *runtimeapi.ContainerResources) error + UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error } type sourcesReadyStub struct{} diff --git a/pkg/kubelet/cm/memorymanager/memory_manager_test.go b/pkg/kubelet/cm/memorymanager/memory_manager_test.go index dc67c2a47ba..7376588f349 100644 --- a/pkg/kubelet/cm/memorymanager/memory_manager_test.go +++ b/pkg/kubelet/cm/memorymanager/memory_manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package memorymanager import ( - "context" "fmt" "os" "reflect" @@ -122,7 +121,7 @@ type mockRuntimeService struct { err error } -func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error { +func (rt mockRuntimeService) UpdateContainerResources(id string, resources *runtimeapi.ContainerResources) error { return rt.err } diff --git a/pkg/kubelet/container/container_gc.go b/pkg/kubelet/container/container_gc.go index b0a25d50058..c1df294a963 100644 --- a/pkg/kubelet/container/container_gc.go +++ b/pkg/kubelet/container/container_gc.go @@ -17,7 +17,6 @@ limitations under the License. package container import ( - "context" "fmt" "time" @@ -42,9 +41,9 @@ type GCPolicy struct { // Implementation is thread-compatible. type GC interface { // Garbage collect containers. - GarbageCollect(ctx context.Context) error + GarbageCollect() error // Deletes all unused containers, including containers belonging to pods that are terminated but not deleted - DeleteAllUnusedContainers(ctx context.Context) error + DeleteAllUnusedContainers() error } // SourcesReadyProvider knows how to determine if configuration sources are ready @@ -78,11 +77,11 @@ func NewContainerGC(runtime Runtime, policy GCPolicy, sourcesReadyProvider Sourc }, nil } -func (cgc *realContainerGC) GarbageCollect(ctx context.Context) error { - return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), false) +func (cgc *realContainerGC) GarbageCollect() error { + return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), false) } -func (cgc *realContainerGC) DeleteAllUnusedContainers(ctx context.Context) error { +func (cgc *realContainerGC) DeleteAllUnusedContainers() error { klog.InfoS("Attempting to delete unused containers") - return cgc.runtime.GarbageCollect(ctx, cgc.policy, cgc.sourcesReadyProvider.AllReady(), true) + return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true) } diff --git a/pkg/kubelet/container/helpers.go b/pkg/kubelet/container/helpers.go index 0de9d034e57..d11f2d67886 100644 --- a/pkg/kubelet/container/helpers.go +++ b/pkg/kubelet/container/helpers.go @@ -17,7 +17,6 @@ limitations under the License. package container import ( - "context" "encoding/json" "fmt" "hash/fnv" @@ -40,13 +39,13 @@ import ( // HandlerRunner runs a lifecycle handler for a container. type HandlerRunner interface { - Run(ctx context.Context, containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) + Run(containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) } // RuntimeHelper wraps kubelet to make container runtime // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { - GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error) + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (contOpts *RunContainerOptions, cleanupAction func(), err error) GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) // GetPodCgroupParent returns the CgroupName identifier, and its literal cgroupfs form on the host // of a pod. diff --git a/pkg/kubelet/container/runtime.go b/pkg/kubelet/container/runtime.go index 4fee4ba6eaf..6f7b76783ab 100644 --- a/pkg/kubelet/container/runtime.go +++ b/pkg/kubelet/container/runtime.go @@ -70,7 +70,7 @@ type Runtime interface { Type() string // Version returns the version information of the container runtime. - Version(ctx context.Context) (Version, error) + Version() (Version, error) // APIVersion returns the cached API version information of the container // runtime. Implementation is expected to update this cache periodically. @@ -79,11 +79,11 @@ type Runtime interface { APIVersion() (Version, error) // Status returns the status of the runtime. An error is returned if the Status // function itself fails, nil otherwise. - Status(ctx context.Context) (*RuntimeStatus, error) + Status() (*RuntimeStatus, error) // GetPods returns a list of containers grouped by pods. The boolean parameter // specifies whether the runtime returns all containers including those already // exited and dead containers (used for garbage collection). - GetPods(ctx context.Context, all bool) ([]*Pod, error) + GetPods(all bool) ([]*Pod, error) // GarbageCollect removes dead containers using the specified container gc policy // If allSourcesReady is not true, it means that kubelet doesn't have the // complete list of pods from all available sources (e.g., apiserver, http, @@ -93,18 +93,18 @@ type Runtime interface { // that are terminated, but not deleted will be evicted. Otherwise, only deleted pods // will be GC'd. // TODO: Revisit this method and make it cleaner. - GarbageCollect(ctx context.Context, gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error + GarbageCollect(gcPolicy GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error // SyncPod syncs the running pod into the desired pod. - SyncPod(ctx context.Context, pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult + SyncPod(pod *v1.Pod, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult // KillPod kills all the containers of a pod. Pod may be nil, running pod must not be. // TODO(random-liu): Return PodSyncResult in KillPod. // gracePeriodOverride if specified allows the caller to override the pod default grace period. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. - KillPod(ctx context.Context, pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error + KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error // GetPodStatus retrieves the status of the pod, including the // information of all containers in the pod that are visible in Runtime. - GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*PodStatus, error) + GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error) // TODO(vmarmol): Unify pod and containerID args. // GetContainerLogs returns logs of a specific container. By // default, it returns a snapshot of the container log. Set 'follow' to true to @@ -112,53 +112,53 @@ type Runtime interface { // "100" or "all") to tail the log. GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) // DeleteContainer deletes a container. If the container is still running, an error is returned. - DeleteContainer(ctx context.Context, containerID ContainerID) error + DeleteContainer(containerID ContainerID) error // ImageService provides methods to image-related methods. ImageService // UpdatePodCIDR sends a new podCIDR to the runtime. // This method just proxies a new runtimeConfig with the updated // CIDR value down to the runtime shim. - UpdatePodCIDR(ctx context.Context, podCIDR string) error + UpdatePodCIDR(podCIDR string) error // CheckpointContainer tells the runtime to checkpoint a container // and store the resulting archive to the checkpoint directory. - CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error + CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error } // StreamingRuntime is the interface implemented by runtimes that handle the serving of the // streaming calls (exec/attach/port-forward) themselves. In this case, Kubelet should redirect to // the runtime server. type StreamingRuntime interface { - GetExec(ctx context.Context, id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) - GetAttach(ctx context.Context, id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) - GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) + GetExec(id ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) + GetAttach(id ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) + GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) } // ImageService interfaces allows to work with image service. type ImageService interface { // PullImage pulls an image from the network to local storage using the supplied // secrets if necessary. It returns a reference (digest or ID) to the pulled image. - PullImage(ctx context.Context, image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + PullImage(image ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) // GetImageRef gets the reference (digest or ID) of the image which has already been in // the local storage. It returns ("", nil) if the image isn't in the local storage. - GetImageRef(ctx context.Context, image ImageSpec) (string, error) + GetImageRef(image ImageSpec) (string, error) // ListImages gets all images currently on the machine. - ListImages(ctx context.Context) ([]Image, error) + ListImages() ([]Image, error) // RemoveImage removes the specified image. - RemoveImage(ctx context.Context, image ImageSpec) error + RemoveImage(image ImageSpec) error // ImageStats returns Image statistics. - ImageStats(ctx context.Context) (*ImageStats, error) + ImageStats() (*ImageStats, error) } // Attacher interface allows to attach a container. type Attacher interface { - AttachContainer(ctx context.Context, id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error) + AttachContainer(id ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) (err error) } // CommandRunner interface allows to run command in a container. type CommandRunner interface { // RunInContainer synchronously executes the command in the container, and returns the output. // If the command completes with a non-0 exit code, a k8s.io/utils/exec.ExitError will be returned. - RunInContainer(ctx context.Context, id ContainerID, cmd []string, timeout time.Duration) ([]byte, error) + RunInContainer(id ContainerID, cmd []string, timeout time.Duration) ([]byte, error) } // Pod is a group of containers. diff --git a/pkg/kubelet/container/runtime_cache.go b/pkg/kubelet/container/runtime_cache.go index 118e10728e0..587a3521153 100644 --- a/pkg/kubelet/container/runtime_cache.go +++ b/pkg/kubelet/container/runtime_cache.go @@ -18,7 +18,6 @@ limitations under the License. package container import ( - "context" "sync" "time" ) @@ -30,12 +29,12 @@ var ( // RuntimeCache is in interface for obtaining cached Pods. type RuntimeCache interface { - GetPods(context.Context) ([]*Pod, error) - ForceUpdateIfOlder(context.Context, time.Time) error + GetPods() ([]*Pod, error) + ForceUpdateIfOlder(time.Time) error } type podsGetter interface { - GetPods(context.Context, bool) ([]*Pod, error) + GetPods(bool) ([]*Pod, error) } // NewRuntimeCache creates a container runtime cache. @@ -61,28 +60,28 @@ type runtimeCache struct { // GetPods returns the cached pods if they are not outdated; otherwise, it // retrieves the latest pods and return them. -func (r *runtimeCache) GetPods(ctx context.Context) ([]*Pod, error) { +func (r *runtimeCache) GetPods() ([]*Pod, error) { r.Lock() defer r.Unlock() if time.Since(r.cacheTime) > defaultCachePeriod { - if err := r.updateCache(ctx); err != nil { + if err := r.updateCache(); err != nil { return nil, err } } return r.pods, nil } -func (r *runtimeCache) ForceUpdateIfOlder(ctx context.Context, minExpectedCacheTime time.Time) error { +func (r *runtimeCache) ForceUpdateIfOlder(minExpectedCacheTime time.Time) error { r.Lock() defer r.Unlock() if r.cacheTime.Before(minExpectedCacheTime) { - return r.updateCache(ctx) + return r.updateCache() } return nil } -func (r *runtimeCache) updateCache(ctx context.Context) error { - pods, timestamp, err := r.getPodsWithTimestamp(ctx) +func (r *runtimeCache) updateCache() error { + pods, timestamp, err := r.getPodsWithTimestamp() if err != nil { return err } @@ -91,9 +90,9 @@ func (r *runtimeCache) updateCache(ctx context.Context) error { } // getPodsWithTimestamp records a timestamp and retrieves pods from the getter. -func (r *runtimeCache) getPodsWithTimestamp(ctx context.Context) ([]*Pod, time.Time, error) { +func (r *runtimeCache) getPodsWithTimestamp() ([]*Pod, time.Time, error) { // Always record the timestamp before getting the pods to avoid stale pods. timestamp := time.Now() - pods, err := r.getter.GetPods(ctx, false) + pods, err := r.getter.GetPods(false) return pods, timestamp, err } diff --git a/pkg/kubelet/container/runtime_cache_fake.go b/pkg/kubelet/container/runtime_cache_fake.go index 4a09b3be923..0c07c7edfd2 100644 --- a/pkg/kubelet/container/runtime_cache_fake.go +++ b/pkg/kubelet/container/runtime_cache_fake.go @@ -16,8 +16,6 @@ limitations under the License. package container -import "context" - // TestRuntimeCache embeds runtimeCache with some additional methods for testing. // It must be declared in the container package to have visibility to runtimeCache. // It cannot be in a "..._test.go" file in order for runtime_cache_test.go to have cross-package visibility to it. @@ -30,7 +28,7 @@ type TestRuntimeCache struct { func (r *TestRuntimeCache) UpdateCacheWithLock() error { r.Lock() defer r.Unlock() - return r.updateCache(context.Background()) + return r.updateCache() } // GetCachedPods returns the cached pods. diff --git a/pkg/kubelet/container/runtime_cache_test.go b/pkg/kubelet/container/runtime_cache_test.go index 84af1d561ef..4fc4da8bf30 100644 --- a/pkg/kubelet/container/runtime_cache_test.go +++ b/pkg/kubelet/container/runtime_cache_test.go @@ -17,7 +17,6 @@ limitations under the License. package container_test import ( - "context" "reflect" "testing" "time" @@ -38,12 +37,11 @@ func comparePods(t *testing.T, expected []*ctest.FakePod, actual []*Pod) { } func TestGetPods(t *testing.T) { - ctx := context.Background() runtime := &ctest.FakeRuntime{} expected := []*ctest.FakePod{{Pod: &Pod{ID: "1111"}}, {Pod: &Pod{ID: "2222"}}, {Pod: &Pod{ID: "3333"}}} runtime.PodList = expected cache := NewTestRuntimeCache(runtime) - actual, err := cache.GetPods(ctx) + actual, err := cache.GetPods() if err != nil { t.Errorf("unexpected error %v", err) } @@ -52,7 +50,6 @@ func TestGetPods(t *testing.T) { } func TestForceUpdateIfOlder(t *testing.T) { - ctx := context.Background() runtime := &ctest.FakeRuntime{} cache := NewTestRuntimeCache(runtime) @@ -66,12 +63,12 @@ func TestForceUpdateIfOlder(t *testing.T) { runtime.PodList = newpods // An older timestamp should not force an update. - cache.ForceUpdateIfOlder(ctx, time.Now().Add(-20*time.Minute)) + cache.ForceUpdateIfOlder(time.Now().Add(-20 * time.Minute)) actual := cache.GetCachedPods() comparePods(t, oldpods, actual) // A newer timestamp should force an update. - cache.ForceUpdateIfOlder(ctx, time.Now().Add(20*time.Second)) + cache.ForceUpdateIfOlder(time.Now().Add(20 * time.Second)) actual = cache.GetCachedPods() comparePods(t, newpods, actual) } diff --git a/pkg/kubelet/container/testing/fake_cache.go b/pkg/kubelet/container/testing/fake_cache.go index c940e4ba978..354a4a4a2f9 100644 --- a/pkg/kubelet/container/testing/fake_cache.go +++ b/pkg/kubelet/container/testing/fake_cache.go @@ -17,7 +17,6 @@ limitations under the License. package testing import ( - "context" "time" "k8s.io/apimachinery/pkg/types" @@ -33,7 +32,7 @@ func NewFakeCache(runtime container.Runtime) container.Cache { } func (c *fakeCache) Get(id types.UID) (*container.PodStatus, error) { - return c.runtime.GetPodStatus(context.Background(), id, "", "") + return c.runtime.GetPodStatus(id, "", "") } func (c *fakeCache) GetNewerThan(id types.UID, minTime time.Time) (*container.PodStatus, error) { diff --git a/pkg/kubelet/container/testing/fake_runtime.go b/pkg/kubelet/container/testing/fake_runtime.go index bafb6310730..c259a06cb13 100644 --- a/pkg/kubelet/container/testing/fake_runtime.go +++ b/pkg/kubelet/container/testing/fake_runtime.go @@ -91,7 +91,7 @@ func (fv *FakeVersion) Compare(other string) (int, error) { } type podsGetter interface { - GetPods(context.Context, bool) ([]*kubecontainer.Pod, error) + GetPods(bool) ([]*kubecontainer.Pod, error) } type FakeRuntimeCache struct { @@ -102,11 +102,11 @@ func NewFakeRuntimeCache(getter podsGetter) kubecontainer.RuntimeCache { return &FakeRuntimeCache{getter} } -func (f *FakeRuntimeCache) GetPods(ctx context.Context) ([]*kubecontainer.Pod, error) { - return f.getter.GetPods(ctx, false) +func (f *FakeRuntimeCache) GetPods() ([]*kubecontainer.Pod, error) { + return f.getter.GetPods(false) } -func (f *FakeRuntimeCache) ForceUpdateIfOlder(context.Context, time.Time) error { +func (f *FakeRuntimeCache) ForceUpdateIfOlder(time.Time) error { return nil } @@ -132,7 +132,7 @@ func (f *FakeRuntime) ClearCalls() { } // UpdatePodCIDR fulfills the cri interface. -func (f *FakeRuntime) UpdatePodCIDR(_ context.Context, c string) error { +func (f *FakeRuntime) UpdatePodCIDR(c string) error { return nil } @@ -179,7 +179,7 @@ func (f *FakeRuntime) Type() string { return f.RuntimeType } -func (f *FakeRuntime) Version(_ context.Context) (kubecontainer.Version, error) { +func (f *FakeRuntime) Version() (kubecontainer.Version, error) { f.Lock() defer f.Unlock() @@ -195,7 +195,7 @@ func (f *FakeRuntime) APIVersion() (kubecontainer.Version, error) { return &FakeVersion{Version: f.APIVersionInfo}, f.Err } -func (f *FakeRuntime) Status(_ context.Context) (*kubecontainer.RuntimeStatus, error) { +func (f *FakeRuntime) Status() (*kubecontainer.RuntimeStatus, error) { f.Lock() defer f.Unlock() @@ -203,7 +203,7 @@ func (f *FakeRuntime) Status(_ context.Context) (*kubecontainer.RuntimeStatus, e return f.RuntimeStatus, f.StatusErr } -func (f *FakeRuntime) GetPods(_ context.Context, all bool) ([]*kubecontainer.Pod, error) { +func (f *FakeRuntime) GetPods(all bool) ([]*kubecontainer.Pod, error) { f.Lock() defer f.Unlock() @@ -222,7 +222,7 @@ func (f *FakeRuntime) GetPods(_ context.Context, all bool) ([]*kubecontainer.Pod return pods, f.Err } -func (f *FakeRuntime) SyncPod(_ context.Context, pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { +func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ *kubecontainer.PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { f.Lock() defer f.Unlock() @@ -238,7 +238,7 @@ func (f *FakeRuntime) SyncPod(_ context.Context, pod *v1.Pod, _ *kubecontainer.P return } -func (f *FakeRuntime) KillPod(_ context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { +func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { f.Lock() defer f.Unlock() @@ -276,7 +276,7 @@ func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) er return f.Err } -func (f *FakeRuntime) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { +func (f *FakeRuntime) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { f.Lock() defer f.Unlock() @@ -293,7 +293,7 @@ func (f *FakeRuntime) GetContainerLogs(_ context.Context, pod *v1.Pod, container return f.Err } -func (f *FakeRuntime) PullImage(_ context.Context, image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (f *FakeRuntime) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { f.Lock() defer f.Unlock() @@ -308,7 +308,7 @@ func (f *FakeRuntime) PullImage(_ context.Context, image kubecontainer.ImageSpec return image.Image, f.Err } -func (f *FakeRuntime) GetImageRef(_ context.Context, image kubecontainer.ImageSpec) (string, error) { +func (f *FakeRuntime) GetImageRef(image kubecontainer.ImageSpec) (string, error) { f.Lock() defer f.Unlock() @@ -321,7 +321,7 @@ func (f *FakeRuntime) GetImageRef(_ context.Context, image kubecontainer.ImageSp return "", f.InspectErr } -func (f *FakeRuntime) ListImages(_ context.Context) ([]kubecontainer.Image, error) { +func (f *FakeRuntime) ListImages() ([]kubecontainer.Image, error) { f.Lock() defer f.Unlock() @@ -329,7 +329,7 @@ func (f *FakeRuntime) ListImages(_ context.Context) ([]kubecontainer.Image, erro return f.ImageList, f.Err } -func (f *FakeRuntime) RemoveImage(_ context.Context, image kubecontainer.ImageSpec) error { +func (f *FakeRuntime) RemoveImage(image kubecontainer.ImageSpec) error { f.Lock() defer f.Unlock() @@ -346,7 +346,7 @@ func (f *FakeRuntime) RemoveImage(_ context.Context, image kubecontainer.ImageSp return f.Err } -func (f *FakeRuntime) GarbageCollect(_ context.Context, gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error { +func (f *FakeRuntime) GarbageCollect(gcPolicy kubecontainer.GCPolicy, ready bool, evictNonDeletedPods bool) error { f.Lock() defer f.Unlock() @@ -354,7 +354,7 @@ func (f *FakeRuntime) GarbageCollect(_ context.Context, gcPolicy kubecontainer.G return f.Err } -func (f *FakeRuntime) DeleteContainer(_ context.Context, containerID kubecontainer.ContainerID) error { +func (f *FakeRuntime) DeleteContainer(containerID kubecontainer.ContainerID) error { f.Lock() defer f.Unlock() @@ -362,7 +362,7 @@ func (f *FakeRuntime) DeleteContainer(_ context.Context, containerID kubecontain return f.Err } -func (f *FakeRuntime) CheckpointContainer(_ context.Context, options *runtimeapi.CheckpointContainerRequest) error { +func (f *FakeRuntime) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { f.Lock() defer f.Unlock() @@ -370,7 +370,7 @@ func (f *FakeRuntime) CheckpointContainer(_ context.Context, options *runtimeapi return f.Err } -func (f *FakeRuntime) ImageStats(_ context.Context) (*kubecontainer.ImageStats, error) { +func (f *FakeRuntime) ImageStats() (*kubecontainer.ImageStats, error) { f.Lock() defer f.Unlock() @@ -378,7 +378,7 @@ func (f *FakeRuntime) ImageStats(_ context.Context) (*kubecontainer.ImageStats, return nil, f.Err } -func (f *FakeStreamingRuntime) GetExec(_ context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { f.Lock() defer f.Unlock() @@ -386,7 +386,7 @@ func (f *FakeStreamingRuntime) GetExec(_ context.Context, id kubecontainer.Conta return &url.URL{Host: FakeHost}, f.Err } -func (f *FakeStreamingRuntime) GetAttach(_ context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { f.Lock() defer f.Unlock() @@ -394,7 +394,7 @@ func (f *FakeStreamingRuntime) GetAttach(_ context.Context, id kubecontainer.Con return &url.URL{Host: FakeHost}, f.Err } -func (f *FakeStreamingRuntime) GetPortForward(_ context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { +func (f *FakeStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { f.Lock() defer f.Unlock() @@ -414,7 +414,7 @@ type FakeContainerCommandRunner struct { var _ kubecontainer.CommandRunner = &FakeContainerCommandRunner{} -func (f *FakeContainerCommandRunner) RunInContainer(_ context.Context, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { +func (f *FakeContainerCommandRunner) RunInContainer(containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { // record invoked values f.ContainerID = containerID f.Cmd = cmd diff --git a/pkg/kubelet/container/testing/fake_runtime_helper.go b/pkg/kubelet/container/testing/fake_runtime_helper.go index 02e06d174e8..eb4a51676bb 100644 --- a/pkg/kubelet/container/testing/fake_runtime_helper.go +++ b/pkg/kubelet/container/testing/fake_runtime_helper.go @@ -17,9 +17,7 @@ limitations under the License. package testing import ( - "context" - - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" kubetypes "k8s.io/apimachinery/pkg/types" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -36,7 +34,7 @@ type FakeRuntimeHelper struct { Err error } -func (f *FakeRuntimeHelper) GenerateRunContainerOptions(_ context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { +func (f *FakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { var opts kubecontainer.RunContainerOptions if len(container.TerminationMessagePath) != 0 { opts.PodContainerDir = f.PodContainerDir diff --git a/pkg/kubelet/container/testing/mock_runtime_cache.go b/pkg/kubelet/container/testing/mock_runtime_cache.go index 58d8be7d977..2c77f358332 100644 --- a/pkg/kubelet/container/testing/mock_runtime_cache.go +++ b/pkg/kubelet/container/testing/mock_runtime_cache.go @@ -21,7 +21,6 @@ limitations under the License. package testing import ( - context "context" reflect "reflect" time "time" @@ -53,32 +52,32 @@ func (m *MockRuntimeCache) EXPECT() *MockRuntimeCacheMockRecorder { } // ForceUpdateIfOlder mocks base method. -func (m *MockRuntimeCache) ForceUpdateIfOlder(arg0 context.Context, arg1 time.Time) error { +func (m *MockRuntimeCache) ForceUpdateIfOlder(arg0 time.Time) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ForceUpdateIfOlder", arg0, arg1) + ret := m.ctrl.Call(m, "ForceUpdateIfOlder", arg0) ret0, _ := ret[0].(error) return ret0 } // ForceUpdateIfOlder indicates an expected call of ForceUpdateIfOlder. -func (mr *MockRuntimeCacheMockRecorder) ForceUpdateIfOlder(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockRuntimeCacheMockRecorder) ForceUpdateIfOlder(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUpdateIfOlder", reflect.TypeOf((*MockRuntimeCache)(nil).ForceUpdateIfOlder), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ForceUpdateIfOlder", reflect.TypeOf((*MockRuntimeCache)(nil).ForceUpdateIfOlder), arg0) } // GetPods mocks base method. -func (m *MockRuntimeCache) GetPods(arg0 context.Context) ([]*container.Pod, error) { +func (m *MockRuntimeCache) GetPods() ([]*container.Pod, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPods", arg0) + ret := m.ctrl.Call(m, "GetPods") ret0, _ := ret[0].([]*container.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPods indicates an expected call of GetPods. -func (mr *MockRuntimeCacheMockRecorder) GetPods(arg0 interface{}) *gomock.Call { +func (mr *MockRuntimeCacheMockRecorder) GetPods() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntimeCache)(nil).GetPods), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntimeCache)(nil).GetPods)) } // MockpodsGetter is a mock of podsGetter interface. @@ -105,16 +104,16 @@ func (m *MockpodsGetter) EXPECT() *MockpodsGetterMockRecorder { } // GetPods mocks base method. -func (m *MockpodsGetter) GetPods(arg0 context.Context, arg1 bool) ([]*container.Pod, error) { +func (m *MockpodsGetter) GetPods(arg0 bool) ([]*container.Pod, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPods", arg0, arg1) + ret := m.ctrl.Call(m, "GetPods", arg0) ret0, _ := ret[0].([]*container.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPods indicates an expected call of GetPods. -func (mr *MockpodsGetterMockRecorder) GetPods(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockpodsGetterMockRecorder) GetPods(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockpodsGetter)(nil).GetPods), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockpodsGetter)(nil).GetPods), arg0) } diff --git a/pkg/kubelet/container/testing/runtime_mock.go b/pkg/kubelet/container/testing/runtime_mock.go index bf2701b8554..1e68ee3f5f8 100644 --- a/pkg/kubelet/container/testing/runtime_mock.go +++ b/pkg/kubelet/container/testing/runtime_mock.go @@ -127,45 +127,45 @@ func (mr *MockRuntimeMockRecorder) APIVersion() *gomock.Call { } // CheckpointContainer mocks base method. -func (m *MockRuntime) CheckpointContainer(ctx context.Context, options *v10.CheckpointContainerRequest) error { +func (m *MockRuntime) CheckpointContainer(options *v10.CheckpointContainerRequest) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CheckpointContainer", ctx, options) + ret := m.ctrl.Call(m, "CheckpointContainer", options) ret0, _ := ret[0].(error) return ret0 } // CheckpointContainer indicates an expected call of CheckpointContainer. -func (mr *MockRuntimeMockRecorder) CheckpointContainer(ctx, options interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) CheckpointContainer(options interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntime)(nil).CheckpointContainer), ctx, options) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckpointContainer", reflect.TypeOf((*MockRuntime)(nil).CheckpointContainer), options) } // DeleteContainer mocks base method. -func (m *MockRuntime) DeleteContainer(ctx context.Context, containerID container.ContainerID) error { +func (m *MockRuntime) DeleteContainer(containerID container.ContainerID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteContainer", ctx, containerID) + ret := m.ctrl.Call(m, "DeleteContainer", containerID) ret0, _ := ret[0].(error) return ret0 } // DeleteContainer indicates an expected call of DeleteContainer. -func (mr *MockRuntimeMockRecorder) DeleteContainer(ctx, containerID interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) DeleteContainer(containerID interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockRuntime)(nil).DeleteContainer), ctx, containerID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteContainer", reflect.TypeOf((*MockRuntime)(nil).DeleteContainer), containerID) } // GarbageCollect mocks base method. -func (m *MockRuntime) GarbageCollect(ctx context.Context, gcPolicy container.GCPolicy, allSourcesReady, evictNonDeletedPods bool) error { +func (m *MockRuntime) GarbageCollect(gcPolicy container.GCPolicy, allSourcesReady, evictNonDeletedPods bool) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GarbageCollect", ctx, gcPolicy, allSourcesReady, evictNonDeletedPods) + ret := m.ctrl.Call(m, "GarbageCollect", gcPolicy, allSourcesReady, evictNonDeletedPods) ret0, _ := ret[0].(error) return ret0 } // GarbageCollect indicates an expected call of GarbageCollect. -func (mr *MockRuntimeMockRecorder) GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollect", reflect.TypeOf((*MockRuntime)(nil).GarbageCollect), ctx, gcPolicy, allSourcesReady, evictNonDeletedPods) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GarbageCollect", reflect.TypeOf((*MockRuntime)(nil).GarbageCollect), gcPolicy, allSourcesReady, evictNonDeletedPods) } // GetContainerLogs mocks base method. @@ -183,150 +183,150 @@ func (mr *MockRuntimeMockRecorder) GetContainerLogs(ctx, pod, containerID, logOp } // GetImageRef mocks base method. -func (m *MockRuntime) GetImageRef(ctx context.Context, image container.ImageSpec) (string, error) { +func (m *MockRuntime) GetImageRef(image container.ImageSpec) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetImageRef", ctx, image) + ret := m.ctrl.Call(m, "GetImageRef", image) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // GetImageRef indicates an expected call of GetImageRef. -func (mr *MockRuntimeMockRecorder) GetImageRef(ctx, image interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) GetImageRef(image interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockRuntime)(nil).GetImageRef), ctx, image) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockRuntime)(nil).GetImageRef), image) } // GetPodStatus mocks base method. -func (m *MockRuntime) GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*container.PodStatus, error) { +func (m *MockRuntime) GetPodStatus(uid types.UID, name, namespace string) (*container.PodStatus, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPodStatus", ctx, uid, name, namespace) + ret := m.ctrl.Call(m, "GetPodStatus", uid, name, namespace) ret0, _ := ret[0].(*container.PodStatus) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPodStatus indicates an expected call of GetPodStatus. -func (mr *MockRuntimeMockRecorder) GetPodStatus(ctx, uid, name, namespace interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) GetPodStatus(uid, name, namespace interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodStatus", reflect.TypeOf((*MockRuntime)(nil).GetPodStatus), ctx, uid, name, namespace) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPodStatus", reflect.TypeOf((*MockRuntime)(nil).GetPodStatus), uid, name, namespace) } // GetPods mocks base method. -func (m *MockRuntime) GetPods(ctx context.Context, all bool) ([]*container.Pod, error) { +func (m *MockRuntime) GetPods(all bool) ([]*container.Pod, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPods", ctx, all) + ret := m.ctrl.Call(m, "GetPods", all) ret0, _ := ret[0].([]*container.Pod) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPods indicates an expected call of GetPods. -func (mr *MockRuntimeMockRecorder) GetPods(ctx, all interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) GetPods(all interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntime)(nil).GetPods), ctx, all) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPods", reflect.TypeOf((*MockRuntime)(nil).GetPods), all) } // ImageStats mocks base method. -func (m *MockRuntime) ImageStats(ctx context.Context) (*container.ImageStats, error) { +func (m *MockRuntime) ImageStats() (*container.ImageStats, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageStats", ctx) + ret := m.ctrl.Call(m, "ImageStats") ret0, _ := ret[0].(*container.ImageStats) ret1, _ := ret[1].(error) return ret0, ret1 } // ImageStats indicates an expected call of ImageStats. -func (mr *MockRuntimeMockRecorder) ImageStats(ctx interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) ImageStats() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockRuntime)(nil).ImageStats), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockRuntime)(nil).ImageStats)) } // KillPod mocks base method. -func (m *MockRuntime) KillPod(ctx context.Context, pod *v1.Pod, runningPod container.Pod, gracePeriodOverride *int64) error { +func (m *MockRuntime) KillPod(pod *v1.Pod, runningPod container.Pod, gracePeriodOverride *int64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "KillPod", ctx, pod, runningPod, gracePeriodOverride) + ret := m.ctrl.Call(m, "KillPod", pod, runningPod, gracePeriodOverride) ret0, _ := ret[0].(error) return ret0 } // KillPod indicates an expected call of KillPod. -func (mr *MockRuntimeMockRecorder) KillPod(ctx, pod, runningPod, gracePeriodOverride interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) KillPod(pod, runningPod, gracePeriodOverride interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillPod", reflect.TypeOf((*MockRuntime)(nil).KillPod), ctx, pod, runningPod, gracePeriodOverride) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "KillPod", reflect.TypeOf((*MockRuntime)(nil).KillPod), pod, runningPod, gracePeriodOverride) } // ListImages mocks base method. -func (m *MockRuntime) ListImages(ctx context.Context) ([]container.Image, error) { +func (m *MockRuntime) ListImages() ([]container.Image, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListImages", ctx) + ret := m.ctrl.Call(m, "ListImages") ret0, _ := ret[0].([]container.Image) ret1, _ := ret[1].(error) return ret0, ret1 } // ListImages indicates an expected call of ListImages. -func (mr *MockRuntimeMockRecorder) ListImages(ctx interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) ListImages() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockRuntime)(nil).ListImages), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockRuntime)(nil).ListImages)) } // PullImage mocks base method. -func (m *MockRuntime) PullImage(ctx context.Context, image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) { +func (m *MockRuntime) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PullImage", ctx, image, pullSecrets, podSandboxConfig) + ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // PullImage indicates an expected call of PullImage. -func (mr *MockRuntimeMockRecorder) PullImage(ctx, image, pullSecrets, podSandboxConfig interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) PullImage(image, pullSecrets, podSandboxConfig interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockRuntime)(nil).PullImage), ctx, image, pullSecrets, podSandboxConfig) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockRuntime)(nil).PullImage), image, pullSecrets, podSandboxConfig) } // RemoveImage mocks base method. -func (m *MockRuntime) RemoveImage(ctx context.Context, image container.ImageSpec) error { +func (m *MockRuntime) RemoveImage(image container.ImageSpec) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveImage", ctx, image) + ret := m.ctrl.Call(m, "RemoveImage", image) ret0, _ := ret[0].(error) return ret0 } // RemoveImage indicates an expected call of RemoveImage. -func (mr *MockRuntimeMockRecorder) RemoveImage(ctx, image interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) RemoveImage(image interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockRuntime)(nil).RemoveImage), ctx, image) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockRuntime)(nil).RemoveImage), image) } // Status mocks base method. -func (m *MockRuntime) Status(ctx context.Context) (*container.RuntimeStatus, error) { +func (m *MockRuntime) Status() (*container.RuntimeStatus, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Status", ctx) + ret := m.ctrl.Call(m, "Status") ret0, _ := ret[0].(*container.RuntimeStatus) ret1, _ := ret[1].(error) return ret0, ret1 } // Status indicates an expected call of Status. -func (mr *MockRuntimeMockRecorder) Status(ctx interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) Status() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntime)(nil).Status), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockRuntime)(nil).Status)) } // SyncPod mocks base method. -func (m *MockRuntime) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *container.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult { +func (m *MockRuntime) SyncPod(pod *v1.Pod, podStatus *container.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) container.PodSyncResult { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SyncPod", ctx, pod, podStatus, pullSecrets, backOff) + ret := m.ctrl.Call(m, "SyncPod", pod, podStatus, pullSecrets, backOff) ret0, _ := ret[0].(container.PodSyncResult) return ret0 } // SyncPod indicates an expected call of SyncPod. -func (mr *MockRuntimeMockRecorder) SyncPod(ctx, pod, podStatus, pullSecrets, backOff interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) SyncPod(pod, podStatus, pullSecrets, backOff interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPod", reflect.TypeOf((*MockRuntime)(nil).SyncPod), ctx, pod, podStatus, pullSecrets, backOff) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncPod", reflect.TypeOf((*MockRuntime)(nil).SyncPod), pod, podStatus, pullSecrets, backOff) } // Type mocks base method. @@ -344,32 +344,32 @@ func (mr *MockRuntimeMockRecorder) Type() *gomock.Call { } // UpdatePodCIDR mocks base method. -func (m *MockRuntime) UpdatePodCIDR(ctx context.Context, podCIDR string) error { +func (m *MockRuntime) UpdatePodCIDR(podCIDR string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdatePodCIDR", ctx, podCIDR) + ret := m.ctrl.Call(m, "UpdatePodCIDR", podCIDR) ret0, _ := ret[0].(error) return ret0 } // UpdatePodCIDR indicates an expected call of UpdatePodCIDR. -func (mr *MockRuntimeMockRecorder) UpdatePodCIDR(ctx, podCIDR interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) UpdatePodCIDR(podCIDR interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodCIDR", reflect.TypeOf((*MockRuntime)(nil).UpdatePodCIDR), ctx, podCIDR) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePodCIDR", reflect.TypeOf((*MockRuntime)(nil).UpdatePodCIDR), podCIDR) } // Version mocks base method. -func (m *MockRuntime) Version(ctx context.Context) (container.Version, error) { +func (m *MockRuntime) Version() (container.Version, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Version", ctx) + ret := m.ctrl.Call(m, "Version") ret0, _ := ret[0].(container.Version) ret1, _ := ret[1].(error) return ret0, ret1 } // Version indicates an expected call of Version. -func (mr *MockRuntimeMockRecorder) Version(ctx interface{}) *gomock.Call { +func (mr *MockRuntimeMockRecorder) Version() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntime)(nil).Version), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockRuntime)(nil).Version)) } // MockStreamingRuntime is a mock of StreamingRuntime interface. @@ -396,48 +396,48 @@ func (m *MockStreamingRuntime) EXPECT() *MockStreamingRuntimeMockRecorder { } // GetAttach mocks base method. -func (m *MockStreamingRuntime) GetAttach(ctx context.Context, id container.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (m *MockStreamingRuntime) GetAttach(id container.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetAttach", ctx, id, stdin, stdout, stderr, tty) + ret := m.ctrl.Call(m, "GetAttach", id, stdin, stdout, stderr, tty) ret0, _ := ret[0].(*url.URL) ret1, _ := ret[1].(error) return ret0, ret1 } // GetAttach indicates an expected call of GetAttach. -func (mr *MockStreamingRuntimeMockRecorder) GetAttach(ctx, id, stdin, stdout, stderr, tty interface{}) *gomock.Call { +func (mr *MockStreamingRuntimeMockRecorder) GetAttach(id, stdin, stdout, stderr, tty interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttach", reflect.TypeOf((*MockStreamingRuntime)(nil).GetAttach), ctx, id, stdin, stdout, stderr, tty) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAttach", reflect.TypeOf((*MockStreamingRuntime)(nil).GetAttach), id, stdin, stdout, stderr, tty) } // GetExec mocks base method. -func (m *MockStreamingRuntime) GetExec(ctx context.Context, id container.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (m *MockStreamingRuntime) GetExec(id container.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetExec", ctx, id, cmd, stdin, stdout, stderr, tty) + ret := m.ctrl.Call(m, "GetExec", id, cmd, stdin, stdout, stderr, tty) ret0, _ := ret[0].(*url.URL) ret1, _ := ret[1].(error) return ret0, ret1 } // GetExec indicates an expected call of GetExec. -func (mr *MockStreamingRuntimeMockRecorder) GetExec(ctx, id, cmd, stdin, stdout, stderr, tty interface{}) *gomock.Call { +func (mr *MockStreamingRuntimeMockRecorder) GetExec(id, cmd, stdin, stdout, stderr, tty interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExec", reflect.TypeOf((*MockStreamingRuntime)(nil).GetExec), ctx, id, cmd, stdin, stdout, stderr, tty) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetExec", reflect.TypeOf((*MockStreamingRuntime)(nil).GetExec), id, cmd, stdin, stdout, stderr, tty) } // GetPortForward mocks base method. -func (m *MockStreamingRuntime) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { +func (m *MockStreamingRuntime) GetPortForward(podName, podNamespace string, podUID types.UID, ports []int32) (*url.URL, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPortForward", ctx, podName, podNamespace, podUID, ports) + ret := m.ctrl.Call(m, "GetPortForward", podName, podNamespace, podUID, ports) ret0, _ := ret[0].(*url.URL) ret1, _ := ret[1].(error) return ret0, ret1 } // GetPortForward indicates an expected call of GetPortForward. -func (mr *MockStreamingRuntimeMockRecorder) GetPortForward(ctx, podName, podNamespace, podUID, ports interface{}) *gomock.Call { +func (mr *MockStreamingRuntimeMockRecorder) GetPortForward(podName, podNamespace, podUID, ports interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPortForward", reflect.TypeOf((*MockStreamingRuntime)(nil).GetPortForward), ctx, podName, podNamespace, podUID, ports) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPortForward", reflect.TypeOf((*MockStreamingRuntime)(nil).GetPortForward), podName, podNamespace, podUID, ports) } // MockImageService is a mock of ImageService interface. @@ -464,77 +464,77 @@ func (m *MockImageService) EXPECT() *MockImageServiceMockRecorder { } // GetImageRef mocks base method. -func (m *MockImageService) GetImageRef(ctx context.Context, image container.ImageSpec) (string, error) { +func (m *MockImageService) GetImageRef(image container.ImageSpec) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetImageRef", ctx, image) + ret := m.ctrl.Call(m, "GetImageRef", image) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // GetImageRef indicates an expected call of GetImageRef. -func (mr *MockImageServiceMockRecorder) GetImageRef(ctx, image interface{}) *gomock.Call { +func (mr *MockImageServiceMockRecorder) GetImageRef(image interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockImageService)(nil).GetImageRef), ctx, image) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetImageRef", reflect.TypeOf((*MockImageService)(nil).GetImageRef), image) } // ImageStats mocks base method. -func (m *MockImageService) ImageStats(ctx context.Context) (*container.ImageStats, error) { +func (m *MockImageService) ImageStats() (*container.ImageStats, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageStats", ctx) + ret := m.ctrl.Call(m, "ImageStats") ret0, _ := ret[0].(*container.ImageStats) ret1, _ := ret[1].(error) return ret0, ret1 } // ImageStats indicates an expected call of ImageStats. -func (mr *MockImageServiceMockRecorder) ImageStats(ctx interface{}) *gomock.Call { +func (mr *MockImageServiceMockRecorder) ImageStats() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockImageService)(nil).ImageStats), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageStats", reflect.TypeOf((*MockImageService)(nil).ImageStats)) } // ListImages mocks base method. -func (m *MockImageService) ListImages(ctx context.Context) ([]container.Image, error) { +func (m *MockImageService) ListImages() ([]container.Image, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListImages", ctx) + ret := m.ctrl.Call(m, "ListImages") ret0, _ := ret[0].([]container.Image) ret1, _ := ret[1].(error) return ret0, ret1 } // ListImages indicates an expected call of ListImages. -func (mr *MockImageServiceMockRecorder) ListImages(ctx interface{}) *gomock.Call { +func (mr *MockImageServiceMockRecorder) ListImages() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageService)(nil).ListImages), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListImages", reflect.TypeOf((*MockImageService)(nil).ListImages)) } // PullImage mocks base method. -func (m *MockImageService) PullImage(ctx context.Context, image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) { +func (m *MockImageService) PullImage(image container.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *v10.PodSandboxConfig) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PullImage", ctx, image, pullSecrets, podSandboxConfig) + ret := m.ctrl.Call(m, "PullImage", image, pullSecrets, podSandboxConfig) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // PullImage indicates an expected call of PullImage. -func (mr *MockImageServiceMockRecorder) PullImage(ctx, image, pullSecrets, podSandboxConfig interface{}) *gomock.Call { +func (mr *MockImageServiceMockRecorder) PullImage(image, pullSecrets, podSandboxConfig interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageService)(nil).PullImage), ctx, image, pullSecrets, podSandboxConfig) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullImage", reflect.TypeOf((*MockImageService)(nil).PullImage), image, pullSecrets, podSandboxConfig) } // RemoveImage mocks base method. -func (m *MockImageService) RemoveImage(ctx context.Context, image container.ImageSpec) error { +func (m *MockImageService) RemoveImage(image container.ImageSpec) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveImage", ctx, image) + ret := m.ctrl.Call(m, "RemoveImage", image) ret0, _ := ret[0].(error) return ret0 } // RemoveImage indicates an expected call of RemoveImage. -func (mr *MockImageServiceMockRecorder) RemoveImage(ctx, image interface{}) *gomock.Call { +func (mr *MockImageServiceMockRecorder) RemoveImage(image interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageService)(nil).RemoveImage), ctx, image) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveImage", reflect.TypeOf((*MockImageService)(nil).RemoveImage), image) } // MockAttacher is a mock of Attacher interface. @@ -561,17 +561,17 @@ func (m *MockAttacher) EXPECT() *MockAttacherMockRecorder { } // AttachContainer mocks base method. -func (m *MockAttacher) AttachContainer(ctx context.Context, id container.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { +func (m *MockAttacher) AttachContainer(id container.ContainerID, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AttachContainer", ctx, id, stdin, stdout, stderr, tty, resize) + ret := m.ctrl.Call(m, "AttachContainer", id, stdin, stdout, stderr, tty, resize) ret0, _ := ret[0].(error) return ret0 } // AttachContainer indicates an expected call of AttachContainer. -func (mr *MockAttacherMockRecorder) AttachContainer(ctx, id, stdin, stdout, stderr, tty, resize interface{}) *gomock.Call { +func (mr *MockAttacherMockRecorder) AttachContainer(id, stdin, stdout, stderr, tty, resize interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachContainer", reflect.TypeOf((*MockAttacher)(nil).AttachContainer), ctx, id, stdin, stdout, stderr, tty, resize) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AttachContainer", reflect.TypeOf((*MockAttacher)(nil).AttachContainer), id, stdin, stdout, stderr, tty, resize) } // MockCommandRunner is a mock of CommandRunner interface. @@ -598,16 +598,16 @@ func (m *MockCommandRunner) EXPECT() *MockCommandRunnerMockRecorder { } // RunInContainer mocks base method. -func (m *MockCommandRunner) RunInContainer(ctx context.Context, id container.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { +func (m *MockCommandRunner) RunInContainer(id container.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RunInContainer", ctx, id, cmd, timeout) + ret := m.ctrl.Call(m, "RunInContainer", id, cmd, timeout) ret0, _ := ret[0].([]byte) ret1, _ := ret[1].(error) return ret0, ret1 } // RunInContainer indicates an expected call of RunInContainer. -func (mr *MockCommandRunnerMockRecorder) RunInContainer(ctx, id, cmd, timeout interface{}) *gomock.Call { +func (mr *MockCommandRunnerMockRecorder) RunInContainer(id, cmd, timeout interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInContainer", reflect.TypeOf((*MockCommandRunner)(nil).RunInContainer), ctx, id, cmd, timeout) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RunInContainer", reflect.TypeOf((*MockCommandRunner)(nil).RunInContainer), id, cmd, timeout) } diff --git a/pkg/kubelet/cri/remote/fake/fake_image_service.go b/pkg/kubelet/cri/remote/fake/fake_image_service.go index 0e10e5ae8aa..03b35c5c8fd 100644 --- a/pkg/kubelet/cri/remote/fake/fake_image_service.go +++ b/pkg/kubelet/cri/remote/fake/fake_image_service.go @@ -24,7 +24,7 @@ import ( // ListImages lists existing images. func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesRequest) (*kubeapi.ListImagesResponse, error) { - images, err := f.ImageService.ListImages(ctx, req.Filter) + images, err := f.ImageService.ListImages(req.Filter) if err != nil { return nil, err } @@ -38,7 +38,7 @@ func (f *RemoteRuntime) ListImages(ctx context.Context, req *kubeapi.ListImagesR // present, returns a response with ImageStatusResponse.Image set to // nil. func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatusRequest) (*kubeapi.ImageStatusResponse, error) { - resp, err := f.ImageService.ImageStatus(ctx, req.Image, false) + resp, err := f.ImageService.ImageStatus(req.Image, false) if err != nil { return nil, err } @@ -48,7 +48,7 @@ func (f *RemoteRuntime) ImageStatus(ctx context.Context, req *kubeapi.ImageStatu // PullImage pulls an image with authentication config. func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageRequest) (*kubeapi.PullImageResponse, error) { - image, err := f.ImageService.PullImage(ctx, req.Image, req.Auth, req.SandboxConfig) + image, err := f.ImageService.PullImage(req.Image, req.Auth, req.SandboxConfig) if err != nil { return nil, err } @@ -62,7 +62,7 @@ func (f *RemoteRuntime) PullImage(ctx context.Context, req *kubeapi.PullImageReq // This call is idempotent, and must not return an error if the image has // already been removed. func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImageRequest) (*kubeapi.RemoveImageResponse, error) { - err := f.ImageService.RemoveImage(ctx, req.Image) + err := f.ImageService.RemoveImage(req.Image) if err != nil { return nil, err } @@ -72,7 +72,7 @@ func (f *RemoteRuntime) RemoveImage(ctx context.Context, req *kubeapi.RemoveImag // ImageFsInfo returns information of the filesystem that is used to store images. func (f *RemoteRuntime) ImageFsInfo(ctx context.Context, req *kubeapi.ImageFsInfoRequest) (*kubeapi.ImageFsInfoResponse, error) { - fsUsage, err := f.ImageService.ImageFsInfo(ctx) + fsUsage, err := f.ImageService.ImageFsInfo() if err != nil { return nil, err } diff --git a/pkg/kubelet/cri/remote/fake/fake_runtime.go b/pkg/kubelet/cri/remote/fake/fake_runtime.go index 7b6c2421248..be52c91a3ac 100644 --- a/pkg/kubelet/cri/remote/fake/fake_runtime.go +++ b/pkg/kubelet/cri/remote/fake/fake_runtime.go @@ -80,13 +80,13 @@ func (f *RemoteRuntime) Stop() { // Version returns the runtime name, runtime version, and runtime API version. func (f *RemoteRuntime) Version(ctx context.Context, req *kubeapi.VersionRequest) (*kubeapi.VersionResponse, error) { - return f.RuntimeService.Version(ctx, req.Version) + return f.RuntimeService.Version(req.Version) } // RunPodSandbox creates and starts a pod-level sandbox. Runtimes must ensure // the sandbox is in the ready state on success. func (f *RemoteRuntime) RunPodSandbox(ctx context.Context, req *kubeapi.RunPodSandboxRequest) (*kubeapi.RunPodSandboxResponse, error) { - sandboxID, err := f.RuntimeService.RunPodSandbox(ctx, req.Config, req.RuntimeHandler) + sandboxID, err := f.RuntimeService.RunPodSandbox(req.Config, req.RuntimeHandler) if err != nil { return nil, err } @@ -99,7 +99,7 @@ func (f *RemoteRuntime) RunPodSandbox(ctx context.Context, req *kubeapi.RunPodSa // If there are any running containers in the sandbox, they must be forcibly // terminated. func (f *RemoteRuntime) StopPodSandbox(ctx context.Context, req *kubeapi.StopPodSandboxRequest) (*kubeapi.StopPodSandboxResponse, error) { - err := f.RuntimeService.StopPodSandbox(ctx, req.PodSandboxId) + err := f.RuntimeService.StopPodSandbox(req.PodSandboxId) if err != nil { return nil, err } @@ -112,7 +112,7 @@ func (f *RemoteRuntime) StopPodSandbox(ctx context.Context, req *kubeapi.StopPod // This call is idempotent, and must not return an error if the sandbox has // already been removed. func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.RemovePodSandboxRequest) (*kubeapi.RemovePodSandboxResponse, error) { - err := f.RuntimeService.StopPodSandbox(ctx, req.PodSandboxId) + err := f.RuntimeService.StopPodSandbox(req.PodSandboxId) if err != nil { return nil, err } @@ -123,7 +123,7 @@ func (f *RemoteRuntime) RemovePodSandbox(ctx context.Context, req *kubeapi.Remov // PodSandboxStatus returns the status of the PodSandbox. If the PodSandbox is not // present, returns an error. func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSandboxStatusRequest) (*kubeapi.PodSandboxStatusResponse, error) { - resp, err := f.RuntimeService.PodSandboxStatus(ctx, req.PodSandboxId, false) + resp, err := f.RuntimeService.PodSandboxStatus(req.PodSandboxId, false) if err != nil { return nil, err } @@ -133,7 +133,7 @@ func (f *RemoteRuntime) PodSandboxStatus(ctx context.Context, req *kubeapi.PodSa // ListPodSandbox returns a list of PodSandboxes. func (f *RemoteRuntime) ListPodSandbox(ctx context.Context, req *kubeapi.ListPodSandboxRequest) (*kubeapi.ListPodSandboxResponse, error) { - items, err := f.RuntimeService.ListPodSandbox(ctx, req.Filter) + items, err := f.RuntimeService.ListPodSandbox(req.Filter) if err != nil { return nil, err } @@ -143,7 +143,7 @@ func (f *RemoteRuntime) ListPodSandbox(ctx context.Context, req *kubeapi.ListPod // CreateContainer creates a new container in specified PodSandbox func (f *RemoteRuntime) CreateContainer(ctx context.Context, req *kubeapi.CreateContainerRequest) (*kubeapi.CreateContainerResponse, error) { - containerID, err := f.RuntimeService.CreateContainer(ctx, req.PodSandboxId, req.Config, req.SandboxConfig) + containerID, err := f.RuntimeService.CreateContainer(req.PodSandboxId, req.Config, req.SandboxConfig) if err != nil { return nil, err } @@ -153,7 +153,7 @@ func (f *RemoteRuntime) CreateContainer(ctx context.Context, req *kubeapi.Create // StartContainer starts the container. func (f *RemoteRuntime) StartContainer(ctx context.Context, req *kubeapi.StartContainerRequest) (*kubeapi.StartContainerResponse, error) { - err := f.RuntimeService.StartContainer(ctx, req.ContainerId) + err := f.RuntimeService.StartContainer(req.ContainerId) if err != nil { return nil, err } @@ -165,7 +165,7 @@ func (f *RemoteRuntime) StartContainer(ctx context.Context, req *kubeapi.StartCo // This call is idempotent, and must not return an error if the container has // already been stopped. func (f *RemoteRuntime) StopContainer(ctx context.Context, req *kubeapi.StopContainerRequest) (*kubeapi.StopContainerResponse, error) { - err := f.RuntimeService.StopContainer(ctx, req.ContainerId, req.Timeout) + err := f.RuntimeService.StopContainer(req.ContainerId, req.Timeout) if err != nil { return nil, err } @@ -178,7 +178,7 @@ func (f *RemoteRuntime) StopContainer(ctx context.Context, req *kubeapi.StopCont // This call is idempotent, and must not return an error if the container has // already been removed. func (f *RemoteRuntime) RemoveContainer(ctx context.Context, req *kubeapi.RemoveContainerRequest) (*kubeapi.RemoveContainerResponse, error) { - err := f.RuntimeService.RemoveContainer(ctx, req.ContainerId) + err := f.RuntimeService.RemoveContainer(req.ContainerId) if err != nil { return nil, err } @@ -188,7 +188,7 @@ func (f *RemoteRuntime) RemoveContainer(ctx context.Context, req *kubeapi.Remove // ListContainers lists all containers by filters. func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListContainersRequest) (*kubeapi.ListContainersResponse, error) { - items, err := f.RuntimeService.ListContainers(ctx, req.Filter) + items, err := f.RuntimeService.ListContainers(req.Filter) if err != nil { return nil, err } @@ -199,7 +199,7 @@ func (f *RemoteRuntime) ListContainers(ctx context.Context, req *kubeapi.ListCon // ContainerStatus returns status of the container. If the container is not // present, returns an error. func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.ContainerStatusRequest) (*kubeapi.ContainerStatusResponse, error) { - resp, err := f.RuntimeService.ContainerStatus(ctx, req.ContainerId, false) + resp, err := f.RuntimeService.ContainerStatus(req.ContainerId, false) if err != nil { return nil, err } @@ -210,7 +210,7 @@ func (f *RemoteRuntime) ContainerStatus(ctx context.Context, req *kubeapi.Contai // ExecSync runs a command in a container synchronously. func (f *RemoteRuntime) ExecSync(ctx context.Context, req *kubeapi.ExecSyncRequest) (*kubeapi.ExecSyncResponse, error) { var exitCode int32 - stdout, stderr, err := f.RuntimeService.ExecSync(ctx, req.ContainerId, req.Cmd, time.Duration(req.Timeout)*time.Second) + stdout, stderr, err := f.RuntimeService.ExecSync(req.ContainerId, req.Cmd, time.Duration(req.Timeout)*time.Second) if err != nil { exitError, ok := err.(utilexec.ExitError) if !ok { @@ -228,23 +228,23 @@ func (f *RemoteRuntime) ExecSync(ctx context.Context, req *kubeapi.ExecSyncReque // Exec prepares a streaming endpoint to execute a command in the container. func (f *RemoteRuntime) Exec(ctx context.Context, req *kubeapi.ExecRequest) (*kubeapi.ExecResponse, error) { - return f.RuntimeService.Exec(ctx, req) + return f.RuntimeService.Exec(req) } // Attach prepares a streaming endpoint to attach to a running container. func (f *RemoteRuntime) Attach(ctx context.Context, req *kubeapi.AttachRequest) (*kubeapi.AttachResponse, error) { - return f.RuntimeService.Attach(ctx, req) + return f.RuntimeService.Attach(req) } // PortForward prepares a streaming endpoint to forward ports from a PodSandbox. func (f *RemoteRuntime) PortForward(ctx context.Context, req *kubeapi.PortForwardRequest) (*kubeapi.PortForwardResponse, error) { - return f.RuntimeService.PortForward(ctx, req) + return f.RuntimeService.PortForward(req) } // ContainerStats returns stats of the container. If the container does not // exist, the call returns an error. func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.ContainerStatsRequest) (*kubeapi.ContainerStatsResponse, error) { - stats, err := f.RuntimeService.ContainerStats(ctx, req.ContainerId) + stats, err := f.RuntimeService.ContainerStats(req.ContainerId) if err != nil { return nil, err } @@ -254,7 +254,7 @@ func (f *RemoteRuntime) ContainerStats(ctx context.Context, req *kubeapi.Contain // ListContainerStats returns stats of all running containers. func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.ListContainerStatsRequest) (*kubeapi.ListContainerStatsResponse, error) { - stats, err := f.RuntimeService.ListContainerStats(ctx, req.Filter) + stats, err := f.RuntimeService.ListContainerStats(req.Filter) if err != nil { return nil, err } @@ -265,7 +265,7 @@ func (f *RemoteRuntime) ListContainerStats(ctx context.Context, req *kubeapi.Lis // PodSandboxStats returns stats of the pod. If the pod does not // exist, the call returns an error. func (f *RemoteRuntime) PodSandboxStats(ctx context.Context, req *kubeapi.PodSandboxStatsRequest) (*kubeapi.PodSandboxStatsResponse, error) { - stats, err := f.RuntimeService.PodSandboxStats(ctx, req.PodSandboxId) + stats, err := f.RuntimeService.PodSandboxStats(req.PodSandboxId) if err != nil { return nil, err } @@ -275,7 +275,7 @@ func (f *RemoteRuntime) PodSandboxStats(ctx context.Context, req *kubeapi.PodSan // ListPodSandboxStats returns stats of all running pods. func (f *RemoteRuntime) ListPodSandboxStats(ctx context.Context, req *kubeapi.ListPodSandboxStatsRequest) (*kubeapi.ListPodSandboxStatsResponse, error) { - stats, err := f.RuntimeService.ListPodSandboxStats(ctx, req.Filter) + stats, err := f.RuntimeService.ListPodSandboxStats(req.Filter) if err != nil { return nil, err } @@ -285,7 +285,7 @@ func (f *RemoteRuntime) ListPodSandboxStats(ctx context.Context, req *kubeapi.Li // UpdateRuntimeConfig updates the runtime configuration based on the given request. func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.UpdateRuntimeConfigRequest) (*kubeapi.UpdateRuntimeConfigResponse, error) { - err := f.RuntimeService.UpdateRuntimeConfig(ctx, req.RuntimeConfig) + err := f.RuntimeService.UpdateRuntimeConfig(req.RuntimeConfig) if err != nil { return nil, err } @@ -295,7 +295,7 @@ func (f *RemoteRuntime) UpdateRuntimeConfig(ctx context.Context, req *kubeapi.Up // Status returns the status of the runtime. func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest) (*kubeapi.StatusResponse, error) { - resp, err := f.RuntimeService.Status(ctx, false) + resp, err := f.RuntimeService.Status(false) if err != nil { return nil, err } @@ -305,7 +305,7 @@ func (f *RemoteRuntime) Status(ctx context.Context, req *kubeapi.StatusRequest) // UpdateContainerResources updates ContainerConfig of the container. func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubeapi.UpdateContainerResourcesRequest) (*kubeapi.UpdateContainerResourcesResponse, error) { - err := f.RuntimeService.UpdateContainerResources(ctx, req.ContainerId, &kubeapi.ContainerResources{Linux: req.Linux}) + err := f.RuntimeService.UpdateContainerResources(req.ContainerId, &kubeapi.ContainerResources{Linux: req.Linux}) if err != nil { return nil, err } @@ -315,7 +315,7 @@ func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubea // ReopenContainerLog reopens the container log file. func (f *RemoteRuntime) ReopenContainerLog(ctx context.Context, req *kubeapi.ReopenContainerLogRequest) (*kubeapi.ReopenContainerLogResponse, error) { - err := f.RuntimeService.ReopenContainerLog(ctx, req.ContainerId) + err := f.RuntimeService.ReopenContainerLog(req.ContainerId) if err != nil { return nil, err } @@ -325,7 +325,7 @@ func (f *RemoteRuntime) ReopenContainerLog(ctx context.Context, req *kubeapi.Reo // CheckpointContainer checkpoints the given container. func (f *RemoteRuntime) CheckpointContainer(ctx context.Context, req *kubeapi.CheckpointContainerRequest) (*kubeapi.CheckpointContainerResponse, error) { - err := f.RuntimeService.CheckpointContainer(ctx, &kubeapi.CheckpointContainerRequest{}) + err := f.RuntimeService.CheckpointContainer(&kubeapi.CheckpointContainerRequest{}) if err != nil { return nil, err } diff --git a/pkg/kubelet/cri/remote/remote_image.go b/pkg/kubelet/cri/remote/remote_image.go index 1060e6b12aa..733a1492118 100644 --- a/pkg/kubelet/cri/remote/remote_image.go +++ b/pkg/kubelet/cri/remote/remote_image.go @@ -82,7 +82,7 @@ func NewRemoteImageService(endpoint string, connectionTimeout time.Duration, tp service := &remoteImageService{timeout: connectionTimeout} - if err := service.determineAPIVersion(ctx, conn, endpoint); err != nil { + if err := service.determineAPIVersion(conn, endpoint); err != nil { return nil, err } @@ -103,8 +103,8 @@ func (r *remoteImageService) useV1API() bool { // being upgraded, then the container runtime must also support the initially // selected version or the redial is expected to fail, which requires a restart // of kubelet. -func (r *remoteImageService) determineAPIVersion(ctx context.Context, conn *grpc.ClientConn, endpoint string) error { - ctx, cancel := context.WithTimeout(ctx, r.timeout) +func (r *remoteImageService) determineAPIVersion(conn *grpc.ClientConn, endpoint string) error { + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() klog.V(4).InfoS("Finding the CRI API image version") @@ -125,8 +125,8 @@ func (r *remoteImageService) determineAPIVersion(ctx context.Context, conn *grpc } // ListImages lists available images. -func (r *remoteImageService) ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { - ctx, cancel := context.WithTimeout(ctx, r.timeout) +func (r *remoteImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -160,8 +160,8 @@ func (r *remoteImageService) listImagesV1(ctx context.Context, filter *runtimeap } // ImageStatus returns the status of the image. -func (r *remoteImageService) ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { - ctx, cancel := context.WithTimeout(ctx, r.timeout) +func (r *remoteImageService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() // TODO: for refactoring common code blocks between the cri versions into @@ -220,8 +220,8 @@ func (r *remoteImageService) imageStatusV1(ctx context.Context, image *runtimeap } // PullImage pulls an image with authentication config. -func (r *remoteImageService) PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { - ctx, cancel := context.WithCancel(ctx) +func (r *remoteImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { + ctx, cancel := getContextWithCancel() defer cancel() if r.useV1API() { @@ -272,8 +272,8 @@ func (r *remoteImageService) pullImageV1(ctx context.Context, image *runtimeapi. } // RemoveImage removes the image. -func (r *remoteImageService) RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) (err error) { - ctx, cancel := context.WithTimeout(ctx, r.timeout) +func (r *remoteImageService) RemoveImage(image *runtimeapi.ImageSpec) (err error) { + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -294,10 +294,10 @@ func (r *remoteImageService) RemoveImage(ctx context.Context, image *runtimeapi. } // ImageFsInfo returns information of the filesystem that is used to store images. -func (r *remoteImageService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) { +func (r *remoteImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { // Do not set timeout, because `ImageFsInfo` takes time. // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := getContextWithCancel() defer cancel() if r.useV1API() { diff --git a/pkg/kubelet/cri/remote/remote_image_test.go b/pkg/kubelet/cri/remote/remote_image_test.go index f253e4f20f8..1944687e23e 100644 --- a/pkg/kubelet/cri/remote/remote_image_test.go +++ b/pkg/kubelet/cri/remote/remote_image_test.go @@ -66,7 +66,7 @@ func TestImageServiceSpansWithTP(t *testing.T) { ) ctx := context.Background() imgSvc := createRemoteImageServiceWithTracerProvider(endpoint, tp, t) - imgRef, err := imgSvc.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) + imgRef, err := imgSvc.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) assert.NoError(t, err) assert.Equal(t, "busybox", imgRef) require.NoError(t, err) @@ -93,7 +93,7 @@ func TestImageServiceSpansWithoutTP(t *testing.T) { ) ctx := context.Background() imgSvc := createRemoteImageServiceWithoutTracerProvider(endpoint, t) - imgRef, err := imgSvc.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) + imgRef, err := imgSvc.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) assert.NoError(t, err) assert.Equal(t, "busybox", imgRef) require.NoError(t, err) diff --git a/pkg/kubelet/cri/remote/remote_runtime.go b/pkg/kubelet/cri/remote/remote_runtime.go index 3bc7430be94..57b46ae09e2 100644 --- a/pkg/kubelet/cri/remote/remote_runtime.go +++ b/pkg/kubelet/cri/remote/remote_runtime.go @@ -108,7 +108,7 @@ func NewRemoteRuntimeService(endpoint string, connectionTimeout time.Duration, t logReduction: logreduction.NewLogReduction(identicalErrorDelay), } - if err := service.determineAPIVersion(ctx, conn, endpoint); err != nil { + if err := service.determineAPIVersion(conn, endpoint); err != nil { return nil, err } @@ -128,8 +128,8 @@ func (r *remoteRuntimeService) useV1API() bool { // being upgraded, then the container runtime must also support the initially // selected version or the redial is expected to fail, which requires a restart // of kubelet. -func (r *remoteRuntimeService) determineAPIVersion(ctx context.Context, conn *grpc.ClientConn, endpoint string) error { - ctx, cancel := context.WithTimeout(ctx, r.timeout) +func (r *remoteRuntimeService) determineAPIVersion(conn *grpc.ClientConn, endpoint string) error { + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() klog.V(4).InfoS("Finding the CRI API runtime version") @@ -150,10 +150,10 @@ func (r *remoteRuntimeService) determineAPIVersion(ctx context.Context, conn *gr } // Version returns the runtime name, runtime version and runtime API version. -func (r *remoteRuntimeService) Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) { +func (r *remoteRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] Version", "apiVersion", apiVersion, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -201,14 +201,14 @@ func (r *remoteRuntimeService) versionV1alpha2(ctx context.Context, apiVersion s // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // the sandbox is in ready state. -func (r *remoteRuntimeService) RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { +func (r *remoteRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { // Use 2 times longer timeout for sandbox operation (4 mins by default) // TODO: Make the pod sandbox timeout configurable. timeout := r.timeout * 2 klog.V(10).InfoS("[RemoteRuntimeService] RunPodSandbox", "config", config, "runtimeHandler", runtimeHandler, "timeout", timeout) - ctx, cancel := context.WithTimeout(ctx, timeout) + ctx, cancel := getContextWithTimeout(timeout) defer cancel() var podSandboxID string @@ -250,10 +250,10 @@ func (r *remoteRuntimeService) RunPodSandbox(ctx context.Context, config *runtim // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be forced to termination. -func (r *remoteRuntimeService) StopPodSandbox(ctx context.Context, podSandBoxID string) (err error) { +func (r *remoteRuntimeService) StopPodSandbox(podSandBoxID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] StopPodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -277,9 +277,9 @@ func (r *remoteRuntimeService) StopPodSandbox(ctx context.Context, podSandBoxID // RemovePodSandbox removes the sandbox. If there are any containers in the // sandbox, they should be forcibly removed. -func (r *remoteRuntimeService) RemovePodSandbox(ctx context.Context, podSandBoxID string) (err error) { +func (r *remoteRuntimeService) RemovePodSandbox(podSandBoxID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] RemovePodSandbox", "podSandboxID", podSandBoxID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -302,9 +302,9 @@ func (r *remoteRuntimeService) RemovePodSandbox(ctx context.Context, podSandBoxI } // PodSandboxStatus returns the status of the PodSandbox. -func (r *remoteRuntimeService) PodSandboxStatus(ctx context.Context, podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { +func (r *remoteRuntimeService) PodSandboxStatus(podSandBoxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStatus", "podSandboxID", podSandBoxID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -357,9 +357,9 @@ func (r *remoteRuntimeService) podSandboxStatusV1(ctx context.Context, podSandBo } // ListPodSandbox returns a list of PodSandboxes. -func (r *remoteRuntimeService) ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { +func (r *remoteRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandbox", "filter", filter, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -398,9 +398,9 @@ func (r *remoteRuntimeService) listPodSandboxV1(ctx context.Context, filter *run } // CreateContainer creates a new container in the specified PodSandbox. -func (r *remoteRuntimeService) CreateContainer(ctx context.Context, podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *remoteRuntimeService) CreateContainer(podSandBoxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { klog.V(10).InfoS("[RemoteRuntimeService] CreateContainer", "podSandboxID", podSandBoxID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -455,9 +455,9 @@ func (r *remoteRuntimeService) createContainerV1(ctx context.Context, podSandBox } // StartContainer starts the container. -func (r *remoteRuntimeService) StartContainer(ctx context.Context, containerID string) (err error) { +func (r *remoteRuntimeService) StartContainer(containerID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] StartContainer", "containerID", containerID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -480,12 +480,12 @@ func (r *remoteRuntimeService) StartContainer(ctx context.Context, containerID s } // StopContainer stops a running container with a grace period (i.e., timeout). -func (r *remoteRuntimeService) StopContainer(ctx context.Context, containerID string, timeout int64) (err error) { +func (r *remoteRuntimeService) StopContainer(containerID string, timeout int64) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] StopContainer", "containerID", containerID, "timeout", timeout) // Use timeout + default timeout (2 minutes) as timeout to leave extra time // for SIGKILL container and request latency. t := r.timeout + time.Duration(timeout)*time.Second - ctx, cancel := context.WithTimeout(ctx, t) + ctx, cancel := getContextWithTimeout(t) defer cancel() r.logReduction.ClearID(containerID) @@ -512,9 +512,9 @@ func (r *remoteRuntimeService) StopContainer(ctx context.Context, containerID st // RemoveContainer removes the container. If the container is running, the container // should be forced to removal. -func (r *remoteRuntimeService) RemoveContainer(ctx context.Context, containerID string) (err error) { +func (r *remoteRuntimeService) RemoveContainer(containerID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] RemoveContainer", "containerID", containerID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() r.logReduction.ClearID(containerID) @@ -537,9 +537,9 @@ func (r *remoteRuntimeService) RemoveContainer(ctx context.Context, containerID } // ListContainers lists containers by filters. -func (r *remoteRuntimeService) ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { +func (r *remoteRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListContainers", "filter", filter, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -576,9 +576,9 @@ func (r *remoteRuntimeService) listContainersV1(ctx context.Context, filter *run } // ContainerStatus returns the container status. -func (r *remoteRuntimeService) ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { +func (r *remoteRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] ContainerStatus", "containerID", containerID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -641,9 +641,9 @@ func (r *remoteRuntimeService) containerStatusV1(ctx context.Context, containerI } // UpdateContainerResources updates a containers resource config -func (r *remoteRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) (err error) { +func (r *remoteRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.ContainerResources) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] UpdateContainerResources", "containerID", containerID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -670,16 +670,17 @@ func (r *remoteRuntimeService) UpdateContainerResources(ctx context.Context, con // ExecSync executes a command in the container, and returns the stdout output. // If command exits with a non-zero exit code, an error is returned. -func (r *remoteRuntimeService) ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { +func (r *remoteRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { klog.V(10).InfoS("[RemoteRuntimeService] ExecSync", "containerID", containerID, "timeout", timeout) // Do not set timeout when timeout is 0. + var ctx context.Context var cancel context.CancelFunc if timeout != 0 { // Use timeout + default timeout (2 minutes) as timeout to leave some time for // the runtime to do cleanup. - ctx, cancel = context.WithTimeout(ctx, r.timeout+timeout) + ctx, cancel = getContextWithTimeout(r.timeout + timeout) } else { - ctx, cancel = context.WithCancel(ctx) + ctx, cancel = getContextWithCancel() } defer cancel() @@ -753,9 +754,9 @@ func (r *remoteRuntimeService) execSyncV1(ctx context.Context, containerID strin } // Exec prepares a streaming endpoint to execute a command in the container, and returns the address. -func (r *remoteRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (r *remoteRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] Exec", "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -802,9 +803,9 @@ func (r *remoteRuntimeService) execV1(ctx context.Context, req *runtimeapi.ExecR } // Attach prepares a streaming endpoint to attach to a running container, and returns the address. -func (r *remoteRuntimeService) Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (r *remoteRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] Attach", "containerID", req.ContainerId, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -849,9 +850,9 @@ func (r *remoteRuntimeService) attachV1(ctx context.Context, req *runtimeapi.Att } // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. -func (r *remoteRuntimeService) PortForward(ctx context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (r *remoteRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] PortForward", "podSandboxID", req.PodSandboxId, "port", req.Port, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -900,9 +901,9 @@ func (r *remoteRuntimeService) portForwardV1(ctx context.Context, req *runtimeap // UpdateRuntimeConfig updates the config of a runtime service. The only // update payload currently supported is the pod CIDR assigned to a node, // and the runtime service just proxies it down to the network plugin. -func (r *remoteRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) (err error) { +func (r *remoteRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] UpdateRuntimeConfig", "runtimeConfig", runtimeConfig, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() // Response doesn't contain anything of interest. This translates to an @@ -927,9 +928,9 @@ func (r *remoteRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeC } // Status returns the status of the runtime. -func (r *remoteRuntimeService) Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) { +func (r *remoteRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) { klog.V(10).InfoS("[RemoteRuntimeService] Status", "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -982,9 +983,9 @@ func (r *remoteRuntimeService) statusV1(ctx context.Context, verbose bool) (*run } // ContainerStats returns the stats of the container. -func (r *remoteRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) { +func (r *remoteRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] ContainerStats", "containerID", containerID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -1027,11 +1028,11 @@ func (r *remoteRuntimeService) containerStatsV1(ctx context.Context, containerID } // ListContainerStats returns the list of ContainerStats given the filter. -func (r *remoteRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { +func (r *remoteRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListContainerStats", "filter", filter) // Do not set timeout, because writable layer stats collection takes time. // TODO(random-liu): Should we assume runtime should cache the result, and set timeout here? - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := getContextWithCancel() defer cancel() if r.useV1API() { @@ -1068,9 +1069,9 @@ func (r *remoteRuntimeService) listContainerStatsV1(ctx context.Context, filter } // PodSandboxStats returns the stats of the pod. -func (r *remoteRuntimeService) PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) { +func (r *remoteRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] PodSandboxStats", "podSandboxID", podSandboxID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -1113,10 +1114,10 @@ func (r *remoteRuntimeService) podSandboxStatsV1(ctx context.Context, podSandbox } // ListPodSandboxStats returns the list of pod sandbox stats given the filter -func (r *remoteRuntimeService) ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { +func (r *remoteRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { klog.V(10).InfoS("[RemoteRuntimeService] ListPodSandboxStats", "filter", filter) // Set timeout, because runtimes are able to cache disk stats results - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -1153,9 +1154,9 @@ func (r *remoteRuntimeService) listPodSandboxStatsV1(ctx context.Context, filter } // ReopenContainerLog reopens the container log file. -func (r *remoteRuntimeService) ReopenContainerLog(ctx context.Context, containerID string) (err error) { +func (r *remoteRuntimeService) ReopenContainerLog(containerID string) (err error) { klog.V(10).InfoS("[RemoteRuntimeService] ReopenContainerLog", "containerID", containerID, "timeout", r.timeout) - ctx, cancel := context.WithTimeout(ctx, r.timeout) + ctx, cancel := getContextWithTimeout(r.timeout) defer cancel() if r.useV1API() { @@ -1173,7 +1174,7 @@ func (r *remoteRuntimeService) ReopenContainerLog(ctx context.Context, container } // CheckpointContainer triggers a checkpoint of the given CheckpointContainerRequest -func (r *remoteRuntimeService) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error { +func (r *remoteRuntimeService) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { klog.V(10).InfoS( "[RemoteRuntimeService] CheckpointContainer", "options", @@ -1190,18 +1191,18 @@ func (r *remoteRuntimeService) CheckpointContainer(ctx context.Context, options return errors.New("CheckpointContainer requires the timeout value to be > 0") } - ctx, cancel := func(ctx context.Context) (context.Context, context.CancelFunc) { + ctx, cancel := func() (context.Context, context.CancelFunc) { defaultTimeout := int64(r.timeout / time.Second) if options.Timeout > defaultTimeout { // The user requested a specific timeout, let's use that if it // is larger than the CRI default. - return context.WithTimeout(ctx, time.Duration(options.Timeout)*time.Second) + return getContextWithTimeout(time.Duration(options.Timeout) * time.Second) } // If the user requested a timeout less than the // CRI default, let's use the CRI default. options.Timeout = defaultTimeout - return context.WithTimeout(ctx, r.timeout) - }(ctx) + return getContextWithTimeout(r.timeout) + }() defer cancel() _, err := r.runtimeClient.CheckpointContainer( diff --git a/pkg/kubelet/cri/remote/remote_runtime_test.go b/pkg/kubelet/cri/remote/remote_runtime_test.go index 4ece13745a7..5219c1ca8b1 100644 --- a/pkg/kubelet/cri/remote/remote_runtime_test.go +++ b/pkg/kubelet/cri/remote/remote_runtime_test.go @@ -87,7 +87,7 @@ func TestGetSpans(t *testing.T) { ) ctx := context.Background() rtSvc := createRemoteRuntimeServiceWithTracerProvider(endpoint, tp, t) - _, err := rtSvc.Version(ctx, apitest.FakeVersion) + _, err := rtSvc.Version(apitest.FakeVersion) require.NoError(t, err) err = tp.ForceFlush(ctx) require.NoError(t, err) @@ -106,9 +106,8 @@ func TestVersion(t *testing.T) { } }() - ctx := context.Background() rtSvc := createRemoteRuntimeService(endpoint, t) - version, err := rtSvc.Version(ctx, apitest.FakeVersion) + version, err := rtSvc.Version(apitest.FakeVersion) require.NoError(t, err) assert.Equal(t, apitest.FakeVersion, version.Version) assert.Equal(t, apitest.FakeRuntimeName, version.RuntimeName) diff --git a/pkg/kubelet/cri/remote/utils.go b/pkg/kubelet/cri/remote/utils.go index ac3f35e45ad..8497d5d2c9c 100644 --- a/pkg/kubelet/cri/remote/utils.go +++ b/pkg/kubelet/cri/remote/utils.go @@ -17,7 +17,9 @@ limitations under the License. package remote import ( + "context" "fmt" + "time" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" ) @@ -26,6 +28,16 @@ import ( // grpc library default is 4MB const maxMsgSize = 1024 * 1024 * 16 +// getContextWithTimeout returns a context with timeout. +func getContextWithTimeout(timeout time.Duration) (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), timeout) +} + +// getContextWithCancel returns a context with cancel. +func getContextWithCancel() (context.Context, context.CancelFunc) { + return context.WithCancel(context.Background()) +} + // verifySandboxStatus verified whether all required fields are set in PodSandboxStatus. func verifySandboxStatus(status *runtimeapi.PodSandboxStatus) error { if status.Id == "" { diff --git a/pkg/kubelet/cri/streaming/portforward/httpstream.go b/pkg/kubelet/cri/streaming/portforward/httpstream.go index a45131081a8..5b0016c3c2f 100644 --- a/pkg/kubelet/cri/streaming/portforward/httpstream.go +++ b/pkg/kubelet/cri/streaming/portforward/httpstream.go @@ -17,7 +17,6 @@ limitations under the License. package portforward import ( - "context" "errors" "fmt" "net/http" @@ -241,7 +240,6 @@ Loop: // portForward invokes the httpStreamHandler's forwarder.PortForward // function for the given stream pair. func (h *httpStreamHandler) portForward(p *httpStreamPair) { - ctx := context.Background() defer p.dataStream.Close() defer p.errorStream.Close() @@ -249,7 +247,7 @@ func (h *httpStreamHandler) portForward(p *httpStreamPair) { port, _ := strconv.ParseInt(portString, 10, 32) klog.V(5).InfoS("Connection request invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString) - err := h.forwarder.PortForward(ctx, h.pod, h.uid, int32(port), p.dataStream) + err := h.forwarder.PortForward(h.pod, h.uid, int32(port), p.dataStream) klog.V(5).InfoS("Connection request done invoking forwarder.PortForward for port", "connection", h.conn, "request", p.requestID, "port", portString) if err != nil { diff --git a/pkg/kubelet/cri/streaming/portforward/portforward.go b/pkg/kubelet/cri/streaming/portforward/portforward.go index df0fe5a8e08..905fc8a7822 100644 --- a/pkg/kubelet/cri/streaming/portforward/portforward.go +++ b/pkg/kubelet/cri/streaming/portforward/portforward.go @@ -17,7 +17,6 @@ limitations under the License. package portforward import ( - "context" "io" "net/http" "time" @@ -31,7 +30,7 @@ import ( // in a pod. type PortForwarder interface { // PortForwarder copies data between a data stream and a port in a pod. - PortForward(ctx context.Context, name string, uid types.UID, port int32, stream io.ReadWriteCloser) error + PortForward(name string, uid types.UID, port int32, stream io.ReadWriteCloser) error } // ServePortForward handles a port forwarding request. A single request is diff --git a/pkg/kubelet/cri/streaming/portforward/websocket.go b/pkg/kubelet/cri/streaming/portforward/websocket.go index cbedb5b6c98..d4ac2a30a94 100644 --- a/pkg/kubelet/cri/streaming/portforward/websocket.go +++ b/pkg/kubelet/cri/streaming/portforward/websocket.go @@ -17,7 +17,6 @@ limitations under the License. package portforward import ( - "context" "encoding/binary" "fmt" "io" @@ -183,12 +182,11 @@ func (h *websocketStreamHandler) run() { } func (h *websocketStreamHandler) portForward(p *websocketStreamPair) { - ctx := context.Background() defer p.dataStream.Close() defer p.errorStream.Close() klog.V(5).InfoS("Connection invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port) - err := h.forwarder.PortForward(ctx, h.pod, h.uid, p.port, p.dataStream) + err := h.forwarder.PortForward(h.pod, h.uid, p.port, p.dataStream) klog.V(5).InfoS("Connection done invoking forwarder.PortForward for port", "connection", h.conn, "port", p.port) if err != nil { diff --git a/pkg/kubelet/cri/streaming/remotecommand/attach.go b/pkg/kubelet/cri/streaming/remotecommand/attach.go index aa638499a95..e266f34fef4 100644 --- a/pkg/kubelet/cri/streaming/remotecommand/attach.go +++ b/pkg/kubelet/cri/streaming/remotecommand/attach.go @@ -17,7 +17,6 @@ limitations under the License. package remotecommand import ( - "context" "fmt" "io" "net/http" @@ -34,7 +33,7 @@ import ( type Attacher interface { // AttachContainer attaches to the running container in the pod, copying data between in/out/err // and the container's stdin/stdout/stderr. - AttachContainer(ctx context.Context, name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error + AttachContainer(name string, uid types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error } // ServeAttach handles requests to attach to a container. After creating/receiving the required @@ -47,7 +46,7 @@ func ServeAttach(w http.ResponseWriter, req *http.Request, attacher Attacher, po } defer ctx.conn.Close() - err := attacher.AttachContainer(req.Context(), podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan) + err := attacher.AttachContainer(podName, uid, container, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan) if err != nil { err = fmt.Errorf("error attaching to container: %v", err) runtime.HandleError(err) diff --git a/pkg/kubelet/cri/streaming/remotecommand/exec.go b/pkg/kubelet/cri/streaming/remotecommand/exec.go index 5ec6b86a8d0..8eaf82a6fe4 100644 --- a/pkg/kubelet/cri/streaming/remotecommand/exec.go +++ b/pkg/kubelet/cri/streaming/remotecommand/exec.go @@ -17,7 +17,6 @@ limitations under the License. package remotecommand import ( - "context" "fmt" "io" "net/http" @@ -36,7 +35,7 @@ import ( type Executor interface { // ExecInContainer executes a command in a container in the pod, copying data // between in/out/err and the container's stdin/stdout/stderr. - ExecInContainer(ctx context.Context, name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error + ExecInContainer(name string, uid types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error } // ServeExec handles requests to execute a command in a container. After @@ -50,7 +49,7 @@ func ServeExec(w http.ResponseWriter, req *http.Request, executor Executor, podN } defer ctx.conn.Close() - err := executor.ExecInContainer(req.Context(), podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0) + err := executor.ExecInContainer(podName, uid, container, cmd, ctx.stdinStream, ctx.stdoutStream, ctx.stderrStream, ctx.tty, ctx.resizeChan, 0) if err != nil { if exitErr, ok := err.(utilexec.ExitError); ok && exitErr.Exited() { rc := exitErr.ExitStatus() diff --git a/pkg/kubelet/cri/streaming/remotecommand/httpstream.go b/pkg/kubelet/cri/streaming/remotecommand/httpstream.go index 8c18b2e7247..c1054152d35 100644 --- a/pkg/kubelet/cri/streaming/remotecommand/httpstream.go +++ b/pkg/kubelet/cri/streaming/remotecommand/httpstream.go @@ -70,9 +70,9 @@ func NewOptions(req *http.Request) (*Options, error) { }, nil } -// connectionContext contains the connection and streams used when +// context contains the connection and streams used when // forwarding an attach or execute session into a container. -type connectionContext struct { +type context struct { conn io.Closer stdinStream io.ReadCloser stdoutStream io.WriteCloser @@ -102,8 +102,8 @@ func waitStreamReply(replySent <-chan struct{}, notify chan<- struct{}, stop <-c } } -func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*connectionContext, bool) { - var ctx *connectionContext +func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { + var ctx *context var ok bool if wsstream.IsWebSocketRequest(req) { ctx, ok = createWebSocketStreams(req, w, opts, idleTimeout) @@ -122,7 +122,7 @@ func createStreams(req *http.Request, w http.ResponseWriter, opts *Options, supp return ctx, true } -func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*connectionContext, bool) { +func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Options, supportedStreamProtocols []string, idleTimeout, streamCreationTimeout time.Duration) (*context, bool) { protocol, err := httpstream.Handshake(req, w, supportedStreamProtocols) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) @@ -194,7 +194,7 @@ func createHTTPStreamStreams(req *http.Request, w http.ResponseWriter, opts *Opt type protocolHandler interface { // waitForStreams waits for the expected streams or a timeout, returning a // remoteCommandContext if all the streams were received, or an error if not. - waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) + waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) // supportsTerminalResizing returns true if the protocol handler supports terminal resizing supportsTerminalResizing() bool } @@ -204,8 +204,8 @@ type protocolHandler interface { // the process' exit code. type v4ProtocolHandler struct{} -func (*v4ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) { - ctx := &connectionContext{} +func (*v4ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} receivedStreams := 0 replyChan := make(chan struct{}) stop := make(chan struct{}) @@ -255,8 +255,8 @@ func (*v4ProtocolHandler) supportsTerminalResizing() bool { return true } // v3ProtocolHandler implements the V3 protocol version for streaming command execution. type v3ProtocolHandler struct{} -func (*v3ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) { - ctx := &connectionContext{} +func (*v3ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} receivedStreams := 0 replyChan := make(chan struct{}) stop := make(chan struct{}) @@ -306,8 +306,8 @@ func (*v3ProtocolHandler) supportsTerminalResizing() bool { return true } // v2ProtocolHandler implements the V2 protocol version for streaming command execution. type v2ProtocolHandler struct{} -func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) { - ctx := &connectionContext{} +func (*v2ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} receivedStreams := 0 replyChan := make(chan struct{}) stop := make(chan struct{}) @@ -354,8 +354,8 @@ func (*v2ProtocolHandler) supportsTerminalResizing() bool { return false } // v1ProtocolHandler implements the V1 protocol version for streaming command execution. type v1ProtocolHandler struct{} -func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*connectionContext, error) { - ctx := &connectionContext{} +func (*v1ProtocolHandler) waitForStreams(streams <-chan streamAndReply, expectedStreams int, expired <-chan time.Time) (*context, error) { + ctx := &context{} receivedStreams := 0 replyChan := make(chan struct{}) stop := make(chan struct{}) diff --git a/pkg/kubelet/cri/streaming/remotecommand/websocket.go b/pkg/kubelet/cri/streaming/remotecommand/websocket.go index a81d2259bda..815a7ecf6e9 100644 --- a/pkg/kubelet/cri/streaming/remotecommand/websocket.go +++ b/pkg/kubelet/cri/streaming/remotecommand/websocket.go @@ -68,9 +68,9 @@ func writeChannel(real bool) wsstream.ChannelType { return wsstream.IgnoreChannel } -// createWebSocketStreams returns a connectionContext containing the websocket connection and +// createWebSocketStreams returns a context containing the websocket connection and // streams needed to perform an exec or an attach. -func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Options, idleTimeout time.Duration) (*connectionContext, bool) { +func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Options, idleTimeout time.Duration) (*context, bool) { channels := createChannels(opts) conn := wsstream.NewConn(map[string]wsstream.ChannelProtocolConfig{ "": { @@ -112,7 +112,7 @@ func createWebSocketStreams(req *http.Request, w http.ResponseWriter, opts *Opti streams[errorChannel].Write([]byte{}) } - ctx := &connectionContext{ + ctx := &context{ conn: conn, stdinStream: streams[stdinChannel], stdoutStream: streams[stdoutChannel], diff --git a/pkg/kubelet/cri/streaming/server.go b/pkg/kubelet/cri/streaming/server.go index 3e989d8aee9..7a50673f187 100644 --- a/pkg/kubelet/cri/streaming/server.go +++ b/pkg/kubelet/cri/streaming/server.go @@ -17,7 +17,6 @@ limitations under the License. package streaming import ( - "context" "crypto/tls" "errors" "io" @@ -62,9 +61,9 @@ type Server interface { // Runtime is the interface to execute the commands and provide the streams. type Runtime interface { - Exec(ctx context.Context, containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error - Attach(ctx context.Context, containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error - PortForward(ctx context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error + Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error + Attach(containerID string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error + PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error } // Config defines the options used for running the stream server. @@ -370,14 +369,14 @@ var _ remotecommandserver.Executor = &criAdapter{} var _ remotecommandserver.Attacher = &criAdapter{} var _ portforward.PortForwarder = &criAdapter{} -func (a *criAdapter) ExecInContainer(ctx context.Context, podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { - return a.Runtime.Exec(ctx, container, cmd, in, out, err, tty, resize) +func (a *criAdapter) ExecInContainer(podName string, podUID types.UID, container string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error { + return a.Runtime.Exec(container, cmd, in, out, err, tty, resize) } -func (a *criAdapter) AttachContainer(ctx context.Context, podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { - return a.Runtime.Attach(ctx, container, in, out, err, tty, resize) +func (a *criAdapter) AttachContainer(podName string, podUID types.UID, container string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { + return a.Runtime.Attach(container, in, out, err, tty, resize) } -func (a *criAdapter) PortForward(ctx context.Context, podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { - return a.Runtime.PortForward(ctx, podName, port, stream) +func (a *criAdapter) PortForward(podName string, podUID types.UID, port int32, stream io.ReadWriteCloser) error { + return a.Runtime.PortForward(podName, port, stream) } diff --git a/pkg/kubelet/cri/streaming/server_test.go b/pkg/kubelet/cri/streaming/server_test.go index 92ab77c03c3..156c45d8626 100644 --- a/pkg/kubelet/cri/streaming/server_test.go +++ b/pkg/kubelet/cri/streaming/server_test.go @@ -17,7 +17,6 @@ limitations under the License. package streaming import ( - "context" "crypto/tls" "io" "net/http" @@ -414,19 +413,19 @@ type fakeRuntime struct { t *testing.T } -func (f *fakeRuntime) Exec(_ context.Context, containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { +func (f *fakeRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { assert.Equal(f.t, testContainerID, containerID) doServerStreams(f.t, "exec", stdin, stdout, stderr) return nil } -func (f *fakeRuntime) Attach(_ context.Context, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { +func (f *fakeRuntime) Attach(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { assert.Equal(f.t, testContainerID, containerID) doServerStreams(f.t, "attach", stdin, stdout, stderr) return nil } -func (f *fakeRuntime) PortForward(_ context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error { +func (f *fakeRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { assert.Equal(f.t, testPodSandboxID, podSandboxID) assert.EqualValues(f.t, testPort, port) doServerStreams(f.t, "portforward", stream, stream, nil) diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 03ed32ba844..8332a522e25 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -17,7 +17,6 @@ limitations under the License. package eviction import ( - "context" "fmt" "sort" "sync" @@ -231,7 +230,6 @@ func (m *managerImpl) IsUnderPIDPressure() bool { // synchronize is the main control loop that enforces eviction thresholds. // Returns the pod that was killed, or nil if no pod was killed. func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) []*v1.Pod { - ctx := context.Background() // if we have nothing to do, just return thresholds := m.config.Thresholds if len(thresholds) == 0 && !m.localStorageCapacityIsolation { @@ -242,7 +240,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act // build the ranking functions (if not yet known) // TODO: have a function in cadvisor that lets us know if global housekeeping has completed if m.dedicatedImageFs == nil { - hasImageFs, ok := diskInfoProvider.HasDedicatedImageFs(ctx) + hasImageFs, ok := diskInfoProvider.HasDedicatedImageFs() if ok != nil { return nil } @@ -253,7 +251,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act activePods := podFunc() updateStats := true - summary, err := m.summaryProvider.Get(ctx, updateStats) + summary, err := m.summaryProvider.Get(updateStats) if err != nil { klog.ErrorS(err, "Eviction manager: failed to get summary stats") return nil @@ -345,7 +343,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim) // check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods. - if m.reclaimNodeLevelResources(ctx, thresholdToReclaim.Signal, resourceToReclaim) { + if m.reclaimNodeLevelResources(thresholdToReclaim.Signal, resourceToReclaim) { klog.InfoS("Eviction manager: able to reduce resource pressure without evicting pods.", "resourceName", resourceToReclaim) return nil } @@ -420,17 +418,17 @@ func (m *managerImpl) waitForPodsCleanup(podCleanedUpFunc PodCleanedUpFunc, pods } // reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required. -func (m *managerImpl) reclaimNodeLevelResources(ctx context.Context, signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool { +func (m *managerImpl) reclaimNodeLevelResources(signalToReclaim evictionapi.Signal, resourceToReclaim v1.ResourceName) bool { nodeReclaimFuncs := m.signalToNodeReclaimFuncs[signalToReclaim] for _, nodeReclaimFunc := range nodeReclaimFuncs { // attempt to reclaim the pressured resource. - if err := nodeReclaimFunc(ctx); err != nil { + if err := nodeReclaimFunc(); err != nil { klog.InfoS("Eviction manager: unexpected error when attempting to reduce resource pressure", "resourceName", resourceToReclaim, "err", err) } } if len(nodeReclaimFuncs) > 0 { - summary, err := m.summaryProvider.Get(ctx, true) + summary, err := m.summaryProvider.Get(true) if err != nil { klog.ErrorS(err, "Eviction manager: failed to get summary stats after resource reclaim") return false diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index 578d586282c..6bb54e0ab61 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package eviction import ( - "context" "fmt" "testing" "time" @@ -68,7 +67,7 @@ type mockDiskInfoProvider struct { } // HasDedicatedImageFs returns the mocked value -func (m *mockDiskInfoProvider) HasDedicatedImageFs(_ context.Context) (bool, error) { +func (m *mockDiskInfoProvider) HasDedicatedImageFs() (bool, error) { return m.dedicatedImageFs, nil } @@ -82,7 +81,7 @@ type mockDiskGC struct { } // DeleteUnusedImages returns the mocked values. -func (m *mockDiskGC) DeleteUnusedImages(_ context.Context) error { +func (m *mockDiskGC) DeleteUnusedImages() error { m.imageGCInvoked = true if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil { m.fakeSummaryProvider.result = m.summaryAfterGC @@ -91,7 +90,7 @@ func (m *mockDiskGC) DeleteUnusedImages(_ context.Context) error { } // DeleteAllUnusedContainers returns the mocked value -func (m *mockDiskGC) DeleteAllUnusedContainers(_ context.Context) error { +func (m *mockDiskGC) DeleteAllUnusedContainers() error { m.containerGCInvoked = true if m.summaryAfterGC != nil && m.fakeSummaryProvider != nil { m.fakeSummaryProvider.result = m.summaryAfterGC diff --git a/pkg/kubelet/eviction/helpers_test.go b/pkg/kubelet/eviction/helpers_test.go index 352ba3c1942..46ba5500163 100644 --- a/pkg/kubelet/eviction/helpers_test.go +++ b/pkg/kubelet/eviction/helpers_test.go @@ -17,7 +17,6 @@ limitations under the License. package eviction import ( - "context" "fmt" "reflect" "sort" @@ -1186,11 +1185,11 @@ type fakeSummaryProvider struct { result *statsapi.Summary } -func (f *fakeSummaryProvider) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) { +func (f *fakeSummaryProvider) Get(updateStats bool) (*statsapi.Summary, error) { return f.result, nil } -func (f *fakeSummaryProvider) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) { +func (f *fakeSummaryProvider) GetCPUAndMemoryStats() (*statsapi.Summary, error) { return f.result, nil } diff --git a/pkg/kubelet/eviction/mock_threshold_notifier_test.go b/pkg/kubelet/eviction/mock_threshold_notifier_test.go index 341b7c8b1f4..adc8b47f65e 100644 --- a/pkg/kubelet/eviction/mock_threshold_notifier_test.go +++ b/pkg/kubelet/eviction/mock_threshold_notifier_test.go @@ -21,7 +21,6 @@ limitations under the License. package eviction import ( - context "context" reflect "reflect" time "time" @@ -130,18 +129,18 @@ func (m *MockDiskInfoProvider) EXPECT() *MockDiskInfoProviderMockRecorder { } // HasDedicatedImageFs mocks base method. -func (m *MockDiskInfoProvider) HasDedicatedImageFs(ctx context.Context) (bool, error) { +func (m *MockDiskInfoProvider) HasDedicatedImageFs() (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasDedicatedImageFs", ctx) + ret := m.ctrl.Call(m, "HasDedicatedImageFs") ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // HasDedicatedImageFs indicates an expected call of HasDedicatedImageFs. -func (mr *MockDiskInfoProviderMockRecorder) HasDedicatedImageFs(ctx interface{}) *gomock.Call { +func (mr *MockDiskInfoProviderMockRecorder) HasDedicatedImageFs() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasDedicatedImageFs", reflect.TypeOf((*MockDiskInfoProvider)(nil).HasDedicatedImageFs), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasDedicatedImageFs", reflect.TypeOf((*MockDiskInfoProvider)(nil).HasDedicatedImageFs)) } // MockImageGC is a mock of ImageGC interface. @@ -168,17 +167,17 @@ func (m *MockImageGC) EXPECT() *MockImageGCMockRecorder { } // DeleteUnusedImages mocks base method. -func (m *MockImageGC) DeleteUnusedImages(ctx context.Context) error { +func (m *MockImageGC) DeleteUnusedImages() error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteUnusedImages", ctx) + ret := m.ctrl.Call(m, "DeleteUnusedImages") ret0, _ := ret[0].(error) return ret0 } // DeleteUnusedImages indicates an expected call of DeleteUnusedImages. -func (mr *MockImageGCMockRecorder) DeleteUnusedImages(ctx interface{}) *gomock.Call { +func (mr *MockImageGCMockRecorder) DeleteUnusedImages() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnusedImages", reflect.TypeOf((*MockImageGC)(nil).DeleteUnusedImages), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUnusedImages", reflect.TypeOf((*MockImageGC)(nil).DeleteUnusedImages)) } // MockContainerGC is a mock of ContainerGC interface. @@ -205,17 +204,17 @@ func (m *MockContainerGC) EXPECT() *MockContainerGCMockRecorder { } // DeleteAllUnusedContainers mocks base method. -func (m *MockContainerGC) DeleteAllUnusedContainers(ctx context.Context) error { +func (m *MockContainerGC) DeleteAllUnusedContainers() error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DeleteAllUnusedContainers", ctx) + ret := m.ctrl.Call(m, "DeleteAllUnusedContainers") ret0, _ := ret[0].(error) return ret0 } // DeleteAllUnusedContainers indicates an expected call of DeleteAllUnusedContainers. -func (mr *MockContainerGCMockRecorder) DeleteAllUnusedContainers(ctx interface{}) *gomock.Call { +func (mr *MockContainerGCMockRecorder) DeleteAllUnusedContainers() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllUnusedContainers", reflect.TypeOf((*MockContainerGC)(nil).DeleteAllUnusedContainers), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAllUnusedContainers", reflect.TypeOf((*MockContainerGC)(nil).DeleteAllUnusedContainers)) } // MockCgroupNotifier is a mock of CgroupNotifier interface. diff --git a/pkg/kubelet/eviction/types.go b/pkg/kubelet/eviction/types.go index d0f7a7403d5..4cfb4e1d95d 100644 --- a/pkg/kubelet/eviction/types.go +++ b/pkg/kubelet/eviction/types.go @@ -18,7 +18,6 @@ limitations under the License. package eviction import ( - "context" "time" v1 "k8s.io/api/core/v1" @@ -72,19 +71,19 @@ type Manager interface { // DiskInfoProvider is responsible for informing the manager how disk is configured. type DiskInfoProvider interface { // HasDedicatedImageFs returns true if the imagefs is on a separate device from the rootfs. - HasDedicatedImageFs(ctx context.Context) (bool, error) + HasDedicatedImageFs() (bool, error) } // ImageGC is responsible for performing garbage collection of unused images. type ImageGC interface { // DeleteUnusedImages deletes unused images. - DeleteUnusedImages(ctx context.Context) error + DeleteUnusedImages() error } // ContainerGC is responsible for performing garbage collection of unused containers. type ContainerGC interface { // DeleteAllUnusedContainers deletes all unused containers, even those that belong to pods that are terminated, but not deleted. - DeleteAllUnusedContainers(ctx context.Context) error + DeleteAllUnusedContainers() error } // KillPodFunc kills a pod. @@ -132,7 +131,7 @@ type thresholdsObservedAt map[evictionapi.Threshold]time.Time type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time // nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods. -type nodeReclaimFunc func(ctx context.Context) error +type nodeReclaimFunc func() error // nodeReclaimFuncs is an ordered list of nodeReclaimFunc type nodeReclaimFuncs []nodeReclaimFunc diff --git a/pkg/kubelet/images/helpers.go b/pkg/kubelet/images/helpers.go index b8005d14f17..7bd18bd0524 100644 --- a/pkg/kubelet/images/helpers.go +++ b/pkg/kubelet/images/helpers.go @@ -17,10 +17,9 @@ limitations under the License. package images import ( - "context" "fmt" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/client-go/util/flowcontrol" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -44,9 +43,9 @@ type throttledImageService struct { limiter flowcontrol.RateLimiter } -func (ts throttledImageService) PullImage(ctx context.Context, image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { if ts.limiter.TryAccept() { - return ts.ImageService.PullImage(ctx, image, secrets, podSandboxConfig) + return ts.ImageService.PullImage(image, secrets, podSandboxConfig) } return "", fmt.Errorf("pull QPS exceeded") } diff --git a/pkg/kubelet/images/image_gc_manager.go b/pkg/kubelet/images/image_gc_manager.go index d284ed32f13..517f3ab7ae5 100644 --- a/pkg/kubelet/images/image_gc_manager.go +++ b/pkg/kubelet/images/image_gc_manager.go @@ -17,7 +17,6 @@ limitations under the License. package images import ( - "context" goerrors "errors" "fmt" "math" @@ -25,9 +24,9 @@ import ( "sync" "time" - v1 "k8s.io/api/core/v1" "k8s.io/klog/v2" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" @@ -42,7 +41,7 @@ import ( // collection. type StatsProvider interface { // ImageFsStats returns the stats of the image filesystem. - ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) + ImageFsStats() (*statsapi.FsStats, error) } // ImageGCManager is an interface for managing lifecycle of all images. @@ -50,7 +49,7 @@ type StatsProvider interface { type ImageGCManager interface { // Applies the garbage collection policy. Errors include being unable to free // enough space as per the garbage collection policy. - GarbageCollect(ctx context.Context) error + GarbageCollect() error // Start async garbage collection of images. Start() @@ -58,7 +57,7 @@ type ImageGCManager interface { GetImageList() ([]container.Image, error) // Delete all unused images. - DeleteUnusedImages(ctx context.Context) error + DeleteUnusedImages() error } // ImageGCPolicy is a policy for garbage collecting images. Policy defines an allowed band in @@ -179,14 +178,13 @@ func NewImageGCManager(runtime container.Runtime, statsProvider StatsProvider, r } func (im *realImageGCManager) Start() { - ctx := context.Background() go wait.Until(func() { // Initial detection make detected time "unknown" in the past. var ts time.Time if im.initialized { ts = time.Now() } - _, err := im.detectImages(ctx, ts) + _, err := im.detectImages(ts) if err != nil { klog.InfoS("Failed to monitor images", "err", err) } else { @@ -196,7 +194,7 @@ func (im *realImageGCManager) Start() { // Start a goroutine periodically updates image cache. go wait.Until(func() { - images, err := im.runtime.ListImages(ctx) + images, err := im.runtime.ListImages() if err != nil { klog.InfoS("Failed to update image list", "err", err) } else { @@ -211,20 +209,20 @@ func (im *realImageGCManager) GetImageList() ([]container.Image, error) { return im.imageCache.get(), nil } -func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time.Time) (sets.String, error) { +func (im *realImageGCManager) detectImages(detectTime time.Time) (sets.String, error) { imagesInUse := sets.NewString() // Always consider the container runtime pod sandbox image in use - imageRef, err := im.runtime.GetImageRef(ctx, container.ImageSpec{Image: im.sandboxImage}) + imageRef, err := im.runtime.GetImageRef(container.ImageSpec{Image: im.sandboxImage}) if err == nil && imageRef != "" { imagesInUse.Insert(imageRef) } - images, err := im.runtime.ListImages(ctx) + images, err := im.runtime.ListImages() if err != nil { return imagesInUse, err } - pods, err := im.runtime.GetPods(ctx, true) + pods, err := im.runtime.GetPods(true) if err != nil { return imagesInUse, err } @@ -278,9 +276,9 @@ func (im *realImageGCManager) detectImages(ctx context.Context, detectTime time. return imagesInUse, nil } -func (im *realImageGCManager) GarbageCollect(ctx context.Context) error { +func (im *realImageGCManager) GarbageCollect() error { // Get disk usage on disk holding images. - fsStats, err := im.statsProvider.ImageFsStats(ctx) + fsStats, err := im.statsProvider.ImageFsStats() if err != nil { return err } @@ -310,7 +308,7 @@ func (im *realImageGCManager) GarbageCollect(ctx context.Context) error { if usagePercent >= im.policy.HighThresholdPercent { amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available klog.InfoS("Disk usage on image filesystem is over the high threshold, trying to free bytes down to the low threshold", "usage", usagePercent, "highThreshold", im.policy.HighThresholdPercent, "amountToFree", amountToFree, "lowThreshold", im.policy.LowThresholdPercent) - freed, err := im.freeSpace(ctx, amountToFree, time.Now()) + freed, err := im.freeSpace(amountToFree, time.Now()) if err != nil { return err } @@ -325,9 +323,9 @@ func (im *realImageGCManager) GarbageCollect(ctx context.Context) error { return nil } -func (im *realImageGCManager) DeleteUnusedImages(ctx context.Context) error { +func (im *realImageGCManager) DeleteUnusedImages() error { klog.InfoS("Attempting to delete unused images") - _, err := im.freeSpace(ctx, math.MaxInt64, time.Now()) + _, err := im.freeSpace(math.MaxInt64, time.Now()) return err } @@ -337,8 +335,8 @@ func (im *realImageGCManager) DeleteUnusedImages(ctx context.Context) error { // bytes freed is always returned. // Note that error may be nil and the number of bytes free may be less // than bytesToFree. -func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, freeTime time.Time) (int64, error) { - imagesInUse, err := im.detectImages(ctx, freeTime) +func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) { + imagesInUse, err := im.detectImages(freeTime) if err != nil { return 0, err } @@ -387,7 +385,7 @@ func (im *realImageGCManager) freeSpace(ctx context.Context, bytesToFree int64, // Remove image. Continue despite errors. klog.InfoS("Removing image to free bytes", "imageID", image.id, "size", image.size) - err := im.runtime.RemoveImage(ctx, container.ImageSpec{Image: image.id}) + err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id}) if err != nil { deletionErrors = append(deletionErrors, err) continue diff --git a/pkg/kubelet/images/image_gc_manager_test.go b/pkg/kubelet/images/image_gc_manager_test.go index 2d2c08be387..af5a4d22c04 100644 --- a/pkg/kubelet/images/image_gc_manager_test.go +++ b/pkg/kubelet/images/image_gc_manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package images import ( - "context" "fmt" "testing" "time" @@ -92,7 +91,6 @@ func makeContainer(id int) *container.Container { } func TestDetectImagesInitialDetect(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -121,7 +119,7 @@ func TestDetectImagesInitialDetect(t *testing.T) { } startTime := time.Now().Add(-time.Millisecond) - _, err := manager.detectImages(ctx, zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 3) @@ -140,7 +138,6 @@ func TestDetectImagesInitialDetect(t *testing.T) { } func TestDetectImagesWithNewImage(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -159,7 +156,7 @@ func TestDetectImagesWithNewImage(t *testing.T) { }}, } - _, err := manager.detectImages(ctx, zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) @@ -173,7 +170,7 @@ func TestDetectImagesWithNewImage(t *testing.T) { detectedTime := zero.Add(time.Second) startTime := time.Now().Add(-time.Millisecond) - _, err = manager.detectImages(ctx, detectedTime) + _, err = manager.detectImages(detectedTime) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 3) noContainer, ok := manager.getImageRecord(imageID(0)) @@ -191,7 +188,6 @@ func TestDetectImagesWithNewImage(t *testing.T) { } func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -204,14 +200,13 @@ func TestDeleteUnusedImagesExemptSandboxImage(t *testing.T) { }, } - err := manager.DeleteUnusedImages(ctx) + err := manager.DeleteUnusedImages() assert := assert.New(t) assert.Len(fakeRuntime.ImageList, 1) require.NoError(t, err) } func TestDeletePinnedImage(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -228,14 +223,13 @@ func TestDeletePinnedImage(t *testing.T) { }, } - err := manager.DeleteUnusedImages(ctx) + err := manager.DeleteUnusedImages() assert := assert.New(t) assert.Len(fakeRuntime.ImageList, 2) require.NoError(t, err) } func TestDoNotDeletePinnedImage(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -252,7 +246,7 @@ func TestDoNotDeletePinnedImage(t *testing.T) { }, } - spaceFreed, err := manager.freeSpace(ctx, 4096, time.Now()) + spaceFreed, err := manager.freeSpace(4096, time.Now()) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(1024, spaceFreed) @@ -260,7 +254,6 @@ func TestDoNotDeletePinnedImage(t *testing.T) { } func TestDeleteUnPinnedImage(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -277,7 +270,7 @@ func TestDeleteUnPinnedImage(t *testing.T) { }, } - spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now()) + spaceFreed, err := manager.freeSpace(2048, time.Now()) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(2048, spaceFreed) @@ -285,7 +278,6 @@ func TestDeleteUnPinnedImage(t *testing.T) { } func TestAllPinnedImages(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -303,7 +295,7 @@ func TestAllPinnedImages(t *testing.T) { }, } - spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now()) + spaceFreed, err := manager.freeSpace(2048, time.Now()) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(0, spaceFreed) @@ -311,7 +303,6 @@ func TestAllPinnedImages(t *testing.T) { } func TestDetectImagesContainerStopped(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -329,7 +320,7 @@ func TestDetectImagesContainerStopped(t *testing.T) { }}, } - _, err := manager.detectImages(ctx, zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) @@ -338,7 +329,7 @@ func TestDetectImagesContainerStopped(t *testing.T) { // Simulate container being stopped. fakeRuntime.AllPodList = []*containertest.FakePod{} - _, err = manager.detectImages(ctx, time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) container1, ok := manager.getImageRecord(imageID(0)) @@ -352,7 +343,6 @@ func TestDetectImagesContainerStopped(t *testing.T) { } func TestDetectImagesWithRemovedImages(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -370,20 +360,19 @@ func TestDetectImagesWithRemovedImages(t *testing.T) { }}, } - _, err := manager.detectImages(ctx, zero) + _, err := manager.detectImages(zero) assert := assert.New(t) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 2) // Simulate both images being removed. fakeRuntime.ImageList = []container.Image{} - _, err = manager.detectImages(ctx, time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) assert.Equal(manager.imageRecordsLen(), 0) } func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -401,7 +390,7 @@ func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) { }}, } - spaceFreed, err := manager.freeSpace(ctx, 2048, time.Now()) + spaceFreed, err := manager.freeSpace(2048, time.Now()) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(1024, spaceFreed) @@ -409,7 +398,6 @@ func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) { } func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -428,14 +416,13 @@ func TestDeleteUnusedImagesRemoveAllUnusedImages(t *testing.T) { }}, } - err := manager.DeleteUnusedImages(ctx) + err := manager.DeleteUnusedImages() assert := assert.New(t) require.NoError(t, err) assert.Len(fakeRuntime.ImageList, 1) } func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -455,7 +442,7 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { } // Make 1 be more recently used than 0. - _, err := manager.detectImages(ctx, zero) + _, err := manager.detectImages(zero) require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{ {Pod: &container.Pod{ @@ -464,20 +451,20 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { }, }}, } - _, err = manager.detectImages(ctx, time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{ {Pod: &container.Pod{ Containers: []*container.Container{}, }}, } - _, err = manager.detectImages(ctx, time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) // We're setting the delete time one minute in the future, so the time the image // was first detected and the delete time are different. - spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now().Add(time.Minute)) + spaceFreed, err := manager.freeSpace(1024, time.Now().Add(time.Minute)) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(1024, spaceFreed) @@ -485,7 +472,6 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) { } func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) @@ -503,20 +489,20 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { } // Make 1 more recently detected but used at the same time as 0. - _, err := manager.detectImages(ctx, zero) + _, err := manager.detectImages(zero) require.NoError(t, err) fakeRuntime.ImageList = []container.Image{ makeImage(0, 1024), makeImage(1, 2048), } - _, err = manager.detectImages(ctx, time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) fakeRuntime.AllPodList = []*containertest.FakePod{} - _, err = manager.detectImages(ctx, time.Now()) + _, err = manager.detectImages(time.Now()) require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) - spaceFreed, err := manager.freeSpace(ctx, 1024, time.Now()) + spaceFreed, err := manager.freeSpace(1024, time.Now()) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(2048, spaceFreed) @@ -524,7 +510,6 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) { } func TestGarbageCollectBelowLowThreshold(t *testing.T) { - ctx := context.Background() policy := ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, @@ -535,16 +520,15 @@ func TestGarbageCollectBelowLowThreshold(t *testing.T) { manager, _ := newRealImageGCManager(policy, mockStatsProvider) // Expect 40% usage. - mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{ + mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{ AvailableBytes: uint64Ptr(600), CapacityBytes: uint64Ptr(1000), }, nil) - assert.NoError(t, manager.GarbageCollect(ctx)) + assert.NoError(t, manager.GarbageCollect()) } func TestGarbageCollectCadvisorFailure(t *testing.T) { - ctx := context.Background() policy := ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, @@ -554,12 +538,11 @@ func TestGarbageCollectCadvisorFailure(t *testing.T) { mockStatsProvider := statstest.NewMockProvider(mockCtrl) manager, _ := newRealImageGCManager(policy, mockStatsProvider) - mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{}, fmt.Errorf("error")) - assert.NotNil(t, manager.GarbageCollect(ctx)) + mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{}, fmt.Errorf("error")) + assert.NotNil(t, manager.GarbageCollect()) } func TestGarbageCollectBelowSuccess(t *testing.T) { - ctx := context.Background() policy := ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, @@ -571,7 +554,7 @@ func TestGarbageCollectBelowSuccess(t *testing.T) { manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider) // Expect 95% usage and most of it gets freed. - mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{ + mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{ AvailableBytes: uint64Ptr(50), CapacityBytes: uint64Ptr(1000), }, nil) @@ -579,11 +562,10 @@ func TestGarbageCollectBelowSuccess(t *testing.T) { makeImage(0, 450), } - assert.NoError(t, manager.GarbageCollect(ctx)) + assert.NoError(t, manager.GarbageCollect()) } func TestGarbageCollectNotEnoughFreed(t *testing.T) { - ctx := context.Background() policy := ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, @@ -594,7 +576,7 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) { manager, fakeRuntime := newRealImageGCManager(policy, mockStatsProvider) // Expect 95% usage and little of it gets freed. - mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(&statsapi.FsStats{ + mockStatsProvider.EXPECT().ImageFsStats().Return(&statsapi.FsStats{ AvailableBytes: uint64Ptr(50), CapacityBytes: uint64Ptr(1000), }, nil) @@ -602,11 +584,10 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) { makeImage(0, 50), } - assert.NotNil(t, manager.GarbageCollect(ctx)) + assert.NotNil(t, manager.GarbageCollect()) } func TestGarbageCollectImageNotOldEnough(t *testing.T) { - ctx := context.Background() policy := ImageGCPolicy{ HighThresholdPercent: 90, LowThresholdPercent: 80, @@ -639,11 +620,11 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) { fakeClock := testingclock.NewFakeClock(time.Now()) t.Log(fakeClock.Now()) - _, err := manager.detectImages(ctx, fakeClock.Now()) + _, err := manager.detectImages(fakeClock.Now()) require.NoError(t, err) require.Equal(t, manager.imageRecordsLen(), 2) // no space freed since one image is in used, and another one is not old enough - spaceFreed, err := manager.freeSpace(ctx, 1024, fakeClock.Now()) + spaceFreed, err := manager.freeSpace(1024, fakeClock.Now()) assert := assert.New(t) require.NoError(t, err) assert.EqualValues(0, spaceFreed) @@ -651,7 +632,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) { // move clock by minAge duration, then 1 image will be garbage collected fakeClock.Step(policy.MinAge) - spaceFreed, err = manager.freeSpace(ctx, 1024, fakeClock.Now()) + spaceFreed, err = manager.freeSpace(1024, fakeClock.Now()) require.NoError(t, err) assert.EqualValues(1024, spaceFreed) assert.Len(fakeRuntime.ImageList, 1) diff --git a/pkg/kubelet/images/image_manager.go b/pkg/kubelet/images/image_manager.go index d897fa9c054..f60b14fb5dc 100644 --- a/pkg/kubelet/images/image_manager.go +++ b/pkg/kubelet/images/image_manager.go @@ -17,7 +17,6 @@ limitations under the License. package images import ( - "context" "fmt" "time" @@ -87,7 +86,7 @@ func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix, // EnsureImageExists pulls the image for the specified pod and container, and returns // (imageRef, error message, error). -func (m *imageManager) EnsureImageExists(ctx context.Context, pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) { +func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) { logPrefix := fmt.Sprintf("%s/%s/%s", pod.Namespace, pod.Name, container.Image) ref, err := kubecontainer.GenerateContainerRef(pod, container) if err != nil { @@ -114,7 +113,7 @@ func (m *imageManager) EnsureImageExists(ctx context.Context, pod *v1.Pod, conta Image: image, Annotations: podAnnotations, } - imageRef, err := m.imageService.GetImageRef(ctx, spec) + imageRef, err := m.imageService.GetImageRef(spec) if err != nil { msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err) m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, klog.Warning) @@ -142,7 +141,7 @@ func (m *imageManager) EnsureImageExists(ctx context.Context, pod *v1.Pod, conta m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("Pulling image %q", container.Image), klog.Info) startTime := time.Now() pullChan := make(chan pullResult) - m.puller.pullImage(ctx, spec, pullSecrets, pullChan, podSandboxConfig) + m.puller.pullImage(spec, pullSecrets, pullChan, podSandboxConfig) imagePullResult := <-pullChan if imagePullResult.err != nil { m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, imagePullResult.err), klog.Warning) diff --git a/pkg/kubelet/images/image_manager_test.go b/pkg/kubelet/images/image_manager_test.go index 5d6a61f385b..daaba231c00 100644 --- a/pkg/kubelet/images/image_manager_test.go +++ b/pkg/kubelet/images/image_manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package images import ( - "context" "errors" "testing" "time" @@ -197,11 +196,10 @@ func TestParallelPuller(t *testing.T) { puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv) t.Run(c.testName, func(t *testing.T) { - ctx := context.Background() for _, expected := range c.expected { fakeRuntime.CalledFunctions = nil fakeClock.Step(time.Second) - _, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil) + _, _, err := puller.EnsureImageExists(pod, container, nil, nil) fakeRuntime.AssertCalls(expected.calls) assert.Equal(t, expected.err, err) } @@ -225,11 +223,10 @@ func TestSerializedPuller(t *testing.T) { puller, fakeClock, fakeRuntime, container := pullerTestEnv(c, useSerializedEnv) t.Run(c.testName, func(t *testing.T) { - ctx := context.Background() for _, expected := range c.expected { fakeRuntime.CalledFunctions = nil fakeClock.Step(time.Second) - _, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil) + _, _, err := puller.EnsureImageExists(pod, container, nil, nil) fakeRuntime.AssertCalls(expected.calls) assert.Equal(t, expected.err, err) } @@ -286,12 +283,11 @@ func TestPullAndListImageWithPodAnnotations(t *testing.T) { fakeClock.Step(time.Second) t.Run(c.testName, func(t *testing.T) { - ctx := context.Background() - _, _, err := puller.EnsureImageExists(ctx, pod, container, nil, nil) + _, _, err := puller.EnsureImageExists(pod, container, nil, nil) fakeRuntime.AssertCalls(c.expected[0].calls) assert.Equal(t, c.expected[0].err, err, "tick=%d", 0) - images, _ := fakeRuntime.ListImages(ctx) + images, _ := fakeRuntime.ListImages() assert.Equal(t, 1, len(images), "ListImages() count") image := images[0] diff --git a/pkg/kubelet/images/puller.go b/pkg/kubelet/images/puller.go index fe7e9fb042c..f081c2199c1 100644 --- a/pkg/kubelet/images/puller.go +++ b/pkg/kubelet/images/puller.go @@ -17,10 +17,9 @@ limitations under the License. package images import ( - "context" "time" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" @@ -32,7 +31,7 @@ type pullResult struct { } type imagePuller interface { - pullImage(context.Context, kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *runtimeapi.PodSandboxConfig) + pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- pullResult, *runtimeapi.PodSandboxConfig) } var _, _ imagePuller = ¶llelImagePuller{}, &serialImagePuller{} @@ -45,9 +44,9 @@ func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller return ¶llelImagePuller{imageService} } -func (pip *parallelImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { +func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { go func() { - imageRef, err := pip.imageService.PullImage(ctx, spec, pullSecrets, podSandboxConfig) + imageRef, err := pip.imageService.PullImage(spec, pullSecrets, podSandboxConfig) pullChan <- pullResult{ imageRef: imageRef, err: err, @@ -70,16 +69,14 @@ func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller { } type imagePullRequest struct { - ctx context.Context spec kubecontainer.ImageSpec pullSecrets []v1.Secret pullChan chan<- pullResult podSandboxConfig *runtimeapi.PodSandboxConfig } -func (sip *serialImagePuller) pullImage(ctx context.Context, spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { +func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, pullChan chan<- pullResult, podSandboxConfig *runtimeapi.PodSandboxConfig) { sip.pullRequests <- &imagePullRequest{ - ctx: ctx, spec: spec, pullSecrets: pullSecrets, pullChan: pullChan, @@ -89,7 +86,7 @@ func (sip *serialImagePuller) pullImage(ctx context.Context, spec kubecontainer. func (sip *serialImagePuller) processImagePullRequests() { for pullRequest := range sip.pullRequests { - imageRef, err := sip.imageService.PullImage(pullRequest.ctx, pullRequest.spec, pullRequest.pullSecrets, pullRequest.podSandboxConfig) + imageRef, err := sip.imageService.PullImage(pullRequest.spec, pullRequest.pullSecrets, pullRequest.podSandboxConfig) pullRequest.pullChan <- pullResult{ imageRef: imageRef, err: err, diff --git a/pkg/kubelet/images/types.go b/pkg/kubelet/images/types.go index 3b0397faad4..2d40f97fd55 100644 --- a/pkg/kubelet/images/types.go +++ b/pkg/kubelet/images/types.go @@ -17,10 +17,9 @@ limitations under the License. package images import ( - "context" "errors" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" ) @@ -51,7 +50,7 @@ var ( // Implementations are expected to be thread safe. type ImageManager interface { // EnsureImageExists ensures that image specified in `container` exists. - EnsureImageExists(ctx context.Context, pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) + EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, string, error) // TODO(ronl): consolidating image managing and deleting operation in this interface } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 4f0b3ade380..bcafafec18a 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -197,7 +197,7 @@ type SyncHandler interface { HandlePodRemoves(pods []*v1.Pod) HandlePodReconcile(pods []*v1.Pod) HandlePodSyncs(pods []*v1.Pod) - HandlePodCleanups(ctx context.Context) error + HandlePodCleanups() error } // Option is a functional option type for Kubelet @@ -339,7 +339,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, nodeStatusMaxImages int32, seccompDefault bool, ) (*Kubelet, error) { - ctx := context.Background() logger := klog.TODO() if rootDirectory == "" { @@ -700,7 +699,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.pleg = pleg.NewGenericPLEG(klet.containerRuntime, plegChannelCapacity, plegRelistPeriod, klet.podCache, clock.RealClock{}) klet.runtimeState = newRuntimeState(maxWaitForContainerRuntime) klet.runtimeState.addHealthCheck("PLEG", klet.pleg.Healthy) - if _, err := klet.updatePodCIDR(ctx, kubeCfg.PodCIDR); err != nil { + if _, err := klet.updatePodCIDR(kubeCfg.PodCIDR); err != nil { klog.ErrorS(err, "Pod CIDR update failed") } @@ -1121,7 +1120,7 @@ type Kubelet struct { clock clock.WithTicker // handlers called during the tryUpdateNodeStatus cycle - setNodeStatusFuncs []func(context.Context, *v1.Node) error + setNodeStatusFuncs []func(*v1.Node) error lastNodeUnschedulableLock sync.Mutex // maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus() @@ -1197,23 +1196,23 @@ type Kubelet struct { } // ListPodStats is delegated to StatsProvider, which implements stats.Provider interface -func (kl *Kubelet) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) { - return kl.StatsProvider.ListPodStats(ctx) +func (kl *Kubelet) ListPodStats() ([]statsapi.PodStats, error) { + return kl.StatsProvider.ListPodStats() } // ListPodCPUAndMemoryStats is delegated to StatsProvider, which implements stats.Provider interface -func (kl *Kubelet) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) { - return kl.StatsProvider.ListPodCPUAndMemoryStats(ctx) +func (kl *Kubelet) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { + return kl.StatsProvider.ListPodCPUAndMemoryStats() } // ListPodStatsAndUpdateCPUNanoCoreUsage is delegated to StatsProvider, which implements stats.Provider interface -func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) { - return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx) +func (kl *Kubelet) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) { + return kl.StatsProvider.ListPodStatsAndUpdateCPUNanoCoreUsage() } // ImageFsStats is delegated to StatsProvider, which implements stats.Provider interface -func (kl *Kubelet) ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) { - return kl.StatsProvider.ImageFsStats(ctx) +func (kl *Kubelet) ImageFsStats() (*statsapi.FsStats, error) { + return kl.StatsProvider.ImageFsStats() } // GetCgroupStats is delegated to StatsProvider, which implements stats.Provider interface @@ -1232,8 +1231,8 @@ func (kl *Kubelet) RootFsStats() (*statsapi.FsStats, error) { } // GetContainerInfo is delegated to StatsProvider, which implements stats.Provider interface -func (kl *Kubelet) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { - return kl.StatsProvider.GetContainerInfo(ctx, podFullName, uid, containerName, req) +func (kl *Kubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return kl.StatsProvider.GetContainerInfo(podFullName, uid, containerName, req) } // GetRawContainerInfo is delegated to StatsProvider, which implements stats.Provider interface @@ -1296,8 +1295,7 @@ func (kl *Kubelet) setupDataDirs() error { func (kl *Kubelet) StartGarbageCollection() { loggedContainerGCFailure := false go wait.Until(func() { - ctx := context.Background() - if err := kl.containerGC.GarbageCollect(ctx); err != nil { + if err := kl.containerGC.GarbageCollect(); err != nil { klog.ErrorS(err, "Container garbage collection failed") kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error()) loggedContainerGCFailure = true @@ -1320,8 +1318,7 @@ func (kl *Kubelet) StartGarbageCollection() { prevImageGCFailed := false go wait.Until(func() { - ctx := context.Background() - if err := kl.imageManager.GarbageCollect(ctx); err != nil { + if err := kl.imageManager.GarbageCollect(); err != nil { if prevImageGCFailed { klog.ErrorS(err, "Image garbage collection failed multiple times in a row") // Only create an event for repeated failures @@ -1433,7 +1430,6 @@ func (kl *Kubelet) initializeRuntimeDependentModules() { // Run starts the kubelet reacting to config updates func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { - ctx := context.Background() if kl.logServer == nil { kl.logServer = http.StripPrefix("/logs/", http.FileServer(http.Dir("/var/log/"))) } @@ -1482,7 +1478,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { // Start the pod lifecycle event generator. kl.pleg.Start() - kl.syncLoop(ctx, updates, kl) + kl.syncLoop(updates, kl) } // syncPod is the transaction script for the sync of a single pod (setting up) @@ -1620,7 +1616,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType klog.V(2).InfoS("Pod is not runnable and must have running containers stopped", "pod", klog.KObj(pod), "podUID", pod.UID, "message", runnable.Message) var syncErr error p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) - if err := kl.killPod(ctx, pod, p, nil); err != nil { + if err := kl.killPod(pod, p, nil); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) syncErr = fmt.Errorf("error killing pod: %v", err) utilruntime.HandleError(syncErr) @@ -1672,7 +1668,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType podKilled := false if !pcm.Exists(pod) && !firstSync { p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) - if err := kl.killPod(ctx, pod, p, nil); err == nil { + if err := kl.killPod(pod, p, nil); err == nil { podKilled = true } else { klog.ErrorS(err, "KillPod failed", "pod", klog.KObj(pod), "podStatus", podStatus) @@ -1754,7 +1750,7 @@ func (kl *Kubelet) syncPod(ctx context.Context, updateType kubetypes.SyncPodType kl.probeManager.AddPod(pod) // Call the container runtime's SyncPod callback - result := kl.containerRuntime.SyncPod(ctx, pod, podStatus, pullSecrets, kl.backOff) + result := kl.containerRuntime.SyncPod(pod, podStatus, pullSecrets, kl.backOff) kl.reasonCache.Update(pod.UID, result) if err := result.Error(); err != nil { // Do not return error if the only failures were pods in backoff @@ -1789,7 +1785,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu } else { klog.V(4).InfoS("Pod terminating with grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "gracePeriod", nil) } - if err := kl.killPod(ctx, pod, *runningPod, gracePeriod); err != nil { + if err := kl.killPod(pod, *runningPod, gracePeriod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) // there was an error killing the pod, so we return that error directly utilruntime.HandleError(err) @@ -1814,7 +1810,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu kl.probeManager.StopLivenessAndStartup(pod) p := kubecontainer.ConvertPodStatusToRunningPod(kl.getRuntime().Type(), podStatus) - if err := kl.killPod(ctx, pod, p, gracePeriod); err != nil { + if err := kl.killPod(pod, p, gracePeriod); err != nil { kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedToKillPod, "error killing pod: %v", err) // there was an error killing the pod, so we return that error directly utilruntime.HandleError(err) @@ -1832,7 +1828,7 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu // catch race conditions introduced by callers updating pod status out of order. // TODO: have KillPod return the terminal status of stopped containers and write that into the // cache immediately - podStatus, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { klog.ErrorS(err, "Unable to read pod status prior to final pod termination", "pod", klog.KObj(pod), "podUID", pod.UID) return err @@ -2020,7 +2016,7 @@ func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult { // any new change seen, will run a sync against desired state and running state. If // no changes are seen to the configuration, will synchronize the last known desired // state every sync-frequency seconds. Never returns. -func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpdate, handler SyncHandler) { +func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHandler) { klog.InfoS("Starting kubelet main sync loop") // The syncTicker wakes up kubelet to checks if there are any pod workers // that need to be sync'd. A one-second period is sufficient because the @@ -2055,7 +2051,7 @@ func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpd duration = base kl.syncLoopMonitor.Store(kl.clock.Now()) - if !kl.syncLoopIteration(ctx, updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) { + if !kl.syncLoopIteration(updates, handler, syncTicker.C, housekeepingTicker.C, plegCh) { break } kl.syncLoopMonitor.Store(kl.clock.Now()) @@ -2094,7 +2090,7 @@ func (kl *Kubelet) syncLoop(ctx context.Context, updates <-chan kubetypes.PodUpd // - housekeepingCh: trigger cleanup of pods // - health manager: sync pods that have failed or in which one or more // containers have failed health checks -func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubetypes.PodUpdate, handler SyncHandler, +func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handler SyncHandler, syncCh <-chan time.Time, housekeepingCh <-chan time.Time, plegCh <-chan *pleg.PodLifecycleEvent) bool { select { case u, open := <-configCh: @@ -2190,7 +2186,7 @@ func (kl *Kubelet) syncLoopIteration(ctx context.Context, configCh <-chan kubety } else { start := time.Now() klog.V(4).InfoS("SyncLoop (housekeeping)") - if err := handler.HandlePodCleanups(ctx); err != nil { + if err := handler.HandlePodCleanups(); err != nil { klog.ErrorS(err, "Failed cleaning pods") } duration := time.Since(start) @@ -2364,9 +2360,8 @@ func (kl *Kubelet) LatestLoopEntryTime() time.Time { func (kl *Kubelet) updateRuntimeUp() { kl.updateRuntimeMux.Lock() defer kl.updateRuntimeMux.Unlock() - ctx := context.Background() - s, err := kl.containerRuntime.Status(ctx) + s, err := kl.containerRuntime.Status() if err != nil { klog.ErrorS(err, "Container runtime sanity check failed") return @@ -2450,7 +2445,6 @@ func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID str // Function is executed only during Kubelet start which improves latency to ready node by updating // pod CIDR, runtime status and node statuses ASAP. func (kl *Kubelet) fastStatusUpdateOnce() { - ctx := context.Background() for { time.Sleep(100 * time.Millisecond) node, err := kl.GetNode() @@ -2460,7 +2454,7 @@ func (kl *Kubelet) fastStatusUpdateOnce() { } if len(node.Spec.PodCIDRs) != 0 { podCIDRs := strings.Join(node.Spec.PodCIDRs, ",") - if _, err := kl.updatePodCIDR(ctx, podCIDRs); err != nil { + if _, err := kl.updatePodCIDR(podCIDRs); err != nil { klog.ErrorS(err, "Pod CIDR update failed", "CIDR", podCIDRs) continue } @@ -2477,13 +2471,12 @@ func (kl *Kubelet) fastStatusUpdateOnce() { // engine will be asked to checkpoint the given container into the kubelet's default // checkpoint directory. func (kl *Kubelet) CheckpointContainer( - ctx context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest, ) error { - container, err := kl.findContainer(ctx, podFullName, podUID, containerName) + container, err := kl.findContainer(podFullName, podUID, containerName) if err != nil { return err } @@ -2503,7 +2496,7 @@ func (kl *Kubelet) CheckpointContainer( options.ContainerId = string(container.ID.ID) - if err := kl.containerRuntime.CheckpointContainer(ctx, options); err != nil { + if err := kl.containerRuntime.CheckpointContainer(options); err != nil { return err } diff --git a/pkg/kubelet/kubelet_getters.go b/pkg/kubelet/kubelet_getters.go index eed312ceb15..6b0ce654802 100644 --- a/pkg/kubelet/kubelet_getters.go +++ b/pkg/kubelet/kubelet_getters.go @@ -191,8 +191,8 @@ func (kl *Kubelet) GetPods() []*v1.Pod { // container runtime cache. This function converts kubecontainer.Pod to // v1.Pod, so only the fields that exist in both kubecontainer.Pod and // v1.Pod are considered meaningful. -func (kl *Kubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) { - pods, err := kl.runtimeCache.GetPods(ctx) +func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) { + pods, err := kl.runtimeCache.GetPods() if err != nil { return nil, err } diff --git a/pkg/kubelet/kubelet_network.go b/pkg/kubelet/kubelet_network.go index 4943a53f324..bacbc27c7ff 100644 --- a/pkg/kubelet/kubelet_network.go +++ b/pkg/kubelet/kubelet_network.go @@ -17,10 +17,9 @@ limitations under the License. package kubelet import ( - "context" "fmt" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/klog/v2" ) @@ -41,7 +40,7 @@ func (kl *Kubelet) providerRequiresNetworkingConfiguration() bool { // updatePodCIDR updates the pod CIDR in the runtime state if it is different // from the current CIDR. Return true if pod CIDR is actually changed. -func (kl *Kubelet) updatePodCIDR(ctx context.Context, cidr string) (bool, error) { +func (kl *Kubelet) updatePodCIDR(cidr string) (bool, error) { kl.updatePodCIDRMux.Lock() defer kl.updatePodCIDRMux.Unlock() @@ -53,7 +52,7 @@ func (kl *Kubelet) updatePodCIDR(ctx context.Context, cidr string) (bool, error) // kubelet -> generic runtime -> runtime shim -> network plugin // docker/non-cri implementations have a passthrough UpdatePodCIDR - if err := kl.getRuntime().UpdatePodCIDR(ctx, cidr); err != nil { + if err := kl.getRuntime().UpdatePodCIDR(cidr); err != nil { // If updatePodCIDR would fail, theoretically pod CIDR could not change. // But it is better to be on the safe side to still return true here. return true, fmt.Errorf("failed to update pod CIDR: %v", err) diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index cf0550a7b8d..b28e6288b6e 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -424,7 +424,7 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) { } } - kl.setNodeStatus(ctx, node) + kl.setNodeStatus(node) return node, nil } @@ -435,7 +435,6 @@ func (kl *Kubelet) initialNode(ctx context.Context) (*v1.Node, error) { func (kl *Kubelet) syncNodeStatus() { kl.syncNodeStatusMux.Lock() defer kl.syncNodeStatusMux.Unlock() - ctx := context.Background() if kl.kubeClient == nil || kl.heartbeatClient == nil { return @@ -444,17 +443,17 @@ func (kl *Kubelet) syncNodeStatus() { // This will exit immediately if it doesn't need to do anything. kl.registerWithAPIServer() } - if err := kl.updateNodeStatus(ctx); err != nil { + if err := kl.updateNodeStatus(); err != nil { klog.ErrorS(err, "Unable to update node status") } } // updateNodeStatus updates node status to master with retries if there is any // change or enough time passed from the last sync. -func (kl *Kubelet) updateNodeStatus(ctx context.Context) error { +func (kl *Kubelet) updateNodeStatus() error { klog.V(5).InfoS("Updating node status") for i := 0; i < nodeStatusUpdateRetry; i++ { - if err := kl.tryUpdateNodeStatus(ctx, i); err != nil { + if err := kl.tryUpdateNodeStatus(i); err != nil { if i > 0 && kl.onRepeatedHeartbeatFailure != nil { kl.onRepeatedHeartbeatFailure() } @@ -468,7 +467,7 @@ func (kl *Kubelet) updateNodeStatus(ctx context.Context) error { // tryUpdateNodeStatus tries to update node status to master if there is any // change or enough time passed from the last sync. -func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error { +func (kl *Kubelet) tryUpdateNodeStatus(tryNumber int) error { // In large clusters, GET and PUT operations on Node objects coming // from here are the majority of load on apiserver and etcd. // To reduce the load on etcd, we are serving GET operations from @@ -479,7 +478,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error if tryNumber == 0 { util.FromApiserverCache(&opts) } - node, err := kl.heartbeatClient.CoreV1().Nodes().Get(ctx, string(kl.nodeName), opts) + node, err := kl.heartbeatClient.CoreV1().Nodes().Get(context.TODO(), string(kl.nodeName), opts) if err != nil { return fmt.Errorf("error getting node %q: %v", kl.nodeName, err) } @@ -495,7 +494,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error // node.Spec.PodCIDR being non-empty. We also need to know if pod CIDR is // actually changed. podCIDRs := strings.Join(node.Spec.PodCIDRs, ",") - if podCIDRChanged, err = kl.updatePodCIDR(ctx, podCIDRs); err != nil { + if podCIDRChanged, err = kl.updatePodCIDR(podCIDRs); err != nil { klog.ErrorS(err, "Error updating pod CIDR") } } @@ -519,7 +518,7 @@ func (kl *Kubelet) tryUpdateNodeStatus(ctx context.Context, tryNumber int) error areRequiredLabelsNotPresent = true } - kl.setNodeStatus(ctx, node) + kl.setNodeStatus(node) now := kl.clock.Now() if now.Before(kl.lastStatusReportTime.Add(kl.nodeStatusReportFrequency)) { @@ -571,7 +570,7 @@ func (kl *Kubelet) recordEvent(eventType, event, message string) { } // record if node schedulable change. -func (kl *Kubelet) recordNodeSchedulableEvent(ctx context.Context, node *v1.Node) error { +func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) error { kl.lastNodeUnschedulableLock.Lock() defer kl.lastNodeUnschedulableLock.Unlock() if kl.lastNodeUnschedulable != node.Spec.Unschedulable { @@ -589,10 +588,10 @@ func (kl *Kubelet) recordNodeSchedulableEvent(ctx context.Context, node *v1.Node // any fields that are currently set. // TODO(madhusudancs): Simplify the logic for setting node conditions and // refactor the node status condition code out to a different file. -func (kl *Kubelet) setNodeStatus(ctx context.Context, node *v1.Node) { +func (kl *Kubelet) setNodeStatus(node *v1.Node) { for i, f := range kl.setNodeStatusFuncs { klog.V(5).InfoS("Setting node status condition code", "position", i, "node", klog.KObj(node)) - if err := f(ctx, node); err != nil { + if err := f(node); err != nil { klog.ErrorS(err, "Failed to set some node status fields", "node", klog.KObj(node)) } } @@ -611,7 +610,7 @@ func (kl *Kubelet) getLastObservedNodeAddresses() []v1.NodeAddress { // defaultNodeStatusFuncs is a factory that generates the default set of // setNodeStatus funcs -func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) error { +func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error { // if cloud is not nil, we expect the cloud resource sync manager to exist var nodeAddressesFunc func() ([]v1.NodeAddress, error) if kl.cloud != nil { @@ -621,7 +620,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) er if kl.appArmorValidator != nil { validateHostFunc = kl.appArmorValidator.ValidateHost } - var setters []func(ctx context.Context, n *v1.Node) error + var setters []func(n *v1.Node) error setters = append(setters, nodestatus.NodeAddress(kl.nodeIPs, kl.nodeIPValidator, kl.hostname, kl.hostnameOverridden, kl.externalCloudProvider, kl.cloud, nodeAddressesFunc), nodestatus.MachineInfo(string(kl.nodeName), kl.maxPods, kl.podsPerCore, kl.GetCachedMachineInfo, kl.containerManager.GetCapacity, diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 7c0e6e605e5..6dacc2e3d5b 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -177,7 +177,6 @@ func TestUpdateNewNodeStatus(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() // generate one more in inputImageList than we configure the Kubelet to report, // or 5 images if unlimited numTestImages := int(tc.nodeStatusMaxImages) + 1 @@ -291,7 +290,7 @@ func TestUpdateNewNodeStatus(t *testing.T) { } kubelet.updateRuntimeUp() - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() require.Len(t, actions, 2) require.True(t, actions[1].Matches("patch", "nodes")) @@ -316,7 +315,6 @@ func TestUpdateNewNodeStatus(t *testing.T) { } func TestUpdateExistingNodeStatus(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -480,7 +478,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) { } kubelet.updateRuntimeUp() - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() assert.Len(t, actions, 2) @@ -508,7 +506,6 @@ func TestUpdateExistingNodeStatus(t *testing.T) { } func TestUpdateExistingNodeStatusTimeout(t *testing.T) { - ctx := context.Background() if testing.Short() { t.Skip("skipping test in short mode.") } @@ -562,7 +559,7 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) { } // should return an error, but not hang - assert.Error(t, kubelet.updateNodeStatus(ctx)) + assert.Error(t, kubelet.updateNodeStatus()) // should have attempted multiple times if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry { @@ -575,7 +572,6 @@ func TestUpdateExistingNodeStatusTimeout(t *testing.T) { } func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -685,13 +681,13 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { checkNodeStatus := func(status v1.ConditionStatus, reason string) { kubeClient.ClearActions() - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() require.Len(t, actions, 2) require.True(t, actions[1].Matches("patch", "nodes")) require.Equal(t, actions[1].GetSubresource(), "status") - updatedNode, err := kubeClient.CoreV1().Nodes().Get(ctx, testKubeletHostname, metav1.GetOptions{}) + updatedNode, err := kubeClient.CoreV1().Nodes().Get(context.TODO(), testKubeletHostname, metav1.GetOptions{}) require.NoError(t, err, "can't apply node status patch") for i, cond := range updatedNode.Status.Conditions { @@ -785,19 +781,17 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) { } func TestUpdateNodeStatusError(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet kubelet.kubeClient = nil // ensure only the heartbeat client is used // No matching node for the kubelet testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain - assert.Error(t, kubelet.updateNodeStatus(ctx)) + assert.Error(t, kubelet.updateNodeStatus()) assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry) } func TestUpdateNodeStatusWithLease(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() clock := testKubelet.fakeClock @@ -917,7 +911,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) { // Update node status when node status is created. // Report node status. kubelet.updateRuntimeUp() - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() assert.Len(t, actions, 2) @@ -940,7 +934,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) { // Update node status again when nothing is changed (except heartbeat time). // Report node status if it has exceeded the duration of nodeStatusReportFrequency. clock.Step(time.Minute) - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) // 2 more action (There were 2 actions before). actions = kubeClient.Actions() @@ -965,7 +959,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) { // Update node status again when nothing is changed (except heartbeat time). // Do not report node status if it is within the duration of nodeStatusReportFrequency. clock.Step(10 * time.Second) - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) // Only 1 more action (There were 4 actions before). actions = kubeClient.Actions() @@ -983,7 +977,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) { newMachineInfo := oldMachineInfo.Clone() newMachineInfo.MemoryCapacity = uint64(newMemoryCapacity) kubelet.setCachedMachineInfo(newMachineInfo) - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) // 2 more action (There were 5 actions before). actions = kubeClient.Actions() @@ -1015,7 +1009,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) { updatedNode.Spec.PodCIDR = podCIDRs[0] updatedNode.Spec.PodCIDRs = podCIDRs kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now") // 2 more action (There were 7 actions before). actions = kubeClient.Actions() @@ -1028,7 +1022,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) { clock.Step(10 * time.Second) assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated") - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) // Only 1 more action (There were 9 actions before). actions = kubeClient.Actions() assert.Len(t, actions, 10) @@ -1084,7 +1078,6 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() // Setup testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() @@ -1101,7 +1094,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) { kubelet.volumeManager = fakeVolumeManager // Only test VolumesInUse setter - kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{ + kubelet.setNodeStatusFuncs = []func(*v1.Node) error{ nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced, kubelet.volumeManager.GetVolumesInUse), } @@ -1110,7 +1103,7 @@ func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) { kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain // Execute - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) // Validate actions := kubeClient.Actions() @@ -1352,7 +1345,6 @@ func TestTryRegisterWithApiServer(t *testing.T) { } func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { - ctx := context.Background() const nodeStatusMaxImages = 5 // generate one more in inputImageList than we configure the Kubelet to report @@ -1411,7 +1403,7 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) { } kubelet.updateRuntimeUp() - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() require.Len(t, actions, 2) require.True(t, actions[1].Matches("patch", "nodes")) @@ -2825,7 +2817,6 @@ func TestUpdateNodeAddresses(t *testing.T) { for _, test := range tests { t.Run(test.Name, func(t *testing.T) { - ctx := context.Background() oldNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}, Spec: v1.NodeSpec{}, @@ -2841,15 +2832,15 @@ func TestUpdateNodeAddresses(t *testing.T) { }, } - _, err := kubeClient.CoreV1().Nodes().Update(ctx, oldNode, metav1.UpdateOptions{}) + _, err := kubeClient.CoreV1().Nodes().Update(context.TODO(), oldNode, metav1.UpdateOptions{}) assert.NoError(t, err) - kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{ - func(_ context.Context, node *v1.Node) error { + kubelet.setNodeStatusFuncs = []func(*v1.Node) error{ + func(node *v1.Node) error { node.Status.Addresses = expectedNode.Status.Addresses return nil }, } - assert.NoError(t, kubelet.updateNodeStatus(ctx)) + assert.NoError(t, kubelet.updateNodeStatus()) actions := kubeClient.Actions() lastAction := actions[len(actions)-1] diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index ab9f0ee116e..cc0b3c24a36 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -466,7 +466,7 @@ func (kl *Kubelet) GetPodCgroupParent(pod *v1.Pod) string { // GenerateRunContainerOptions generates the RunContainerOptions, which can be used by // the container runtime to set parameters for launching a container. -func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { +func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string, podIPs []string) (*kubecontainer.RunContainerOptions, func(), error) { opts, err := kl.containerManager.GetResources(pod, container) if err != nil { return nil, nil, err @@ -519,7 +519,7 @@ func (kl *Kubelet) GenerateRunContainerOptions(ctx context.Context, pod *v1.Pod, // only do this check if the experimental behavior is enabled, otherwise allow it to default to false if kl.experimentalHostUserNamespaceDefaulting { - opts.EnableHostUserNamespace = kl.enableHostUserNamespace(ctx, pod) + opts.EnableHostUserNamespace = kl.enableHostUserNamespace(pod) } return opts, cleanupAction, nil @@ -854,9 +854,9 @@ func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, co // killPod instructs the container runtime to kill the pod. This method requires that // the pod status contains the result of the last syncPod, otherwise it may fail to // terminate newly created containers and sandboxes. -func (kl *Kubelet) killPod(ctx context.Context, pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error { +func (kl *Kubelet) killPod(pod *v1.Pod, p kubecontainer.Pod, gracePeriodOverride *int64) error { // Call the container runtime KillPod method which stops all known running containers of the pod - if err := kl.containerRuntime.KillPod(ctx, pod, p, gracePeriodOverride); err != nil { + if err := kl.containerRuntime.KillPod(pod, p, gracePeriodOverride); err != nil { return err } if err := kl.containerManager.UpdateQOSCgroups(); err != nil { @@ -1054,7 +1054,7 @@ func (kl *Kubelet) deleteOrphanedMirrorPods() { // is executing which means no new pods can appear. // NOTE: This function is executed by the main sync loop, so it // should not contain any blocking calls. -func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { +func (kl *Kubelet) HandlePodCleanups() error { // The kubelet lacks checkpointing, so we need to introspect the set of pods // in the cgroup tree prior to inspecting the set of pods in our pod manager. // this ensures our view of the cgroup tree does not mistakenly observe pods @@ -1118,7 +1118,7 @@ func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { // Terminate any pods that are observed in the runtime but not // present in the list of known running pods from config. - runningRuntimePods, err := kl.runtimeCache.GetPods(ctx) + runningRuntimePods, err := kl.runtimeCache.GetPods() if err != nil { klog.ErrorS(err, "Error listing containers") return err @@ -1156,7 +1156,7 @@ func (kl *Kubelet) HandlePodCleanups(ctx context.Context) error { // in the cache. We need to bypass the cache to get the latest set of // running pods to clean up the volumes. // TODO: Evaluate the performance impact of bypassing the runtime cache. - runningRuntimePods, err = kl.containerRuntime.GetPods(ctx, false) + runningRuntimePods, err = kl.containerRuntime.GetPods(false) if err != nil { klog.ErrorS(err, "Error listing containers") return err @@ -1876,8 +1876,8 @@ func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { // findContainer finds and returns the container with the given pod ID, full name, and container name. // It returns nil if not found. -func (kl *Kubelet) findContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { - pods, err := kl.containerRuntime.GetPods(ctx, false) +func (kl *Kubelet) findContainer(podFullName string, podUID types.UID, containerName string) (*kubecontainer.Container, error) { + pods, err := kl.containerRuntime.GetPods(false) if err != nil { return nil, err } @@ -1889,8 +1889,8 @@ func (kl *Kubelet) findContainer(ctx context.Context, podFullName string, podUID } // RunInContainer runs a command in a container, returns the combined stdout, stderr as an array of bytes -func (kl *Kubelet) RunInContainer(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { - container, err := kl.findContainer(ctx, podFullName, podUID, containerName) +func (kl *Kubelet) RunInContainer(podFullName string, podUID types.UID, containerName string, cmd []string) ([]byte, error) { + container, err := kl.findContainer(podFullName, podUID, containerName) if err != nil { return nil, err } @@ -1898,24 +1898,24 @@ func (kl *Kubelet) RunInContainer(ctx context.Context, podFullName string, podUI return nil, fmt.Errorf("container not found (%q)", containerName) } // TODO(tallclair): Pass a proper timeout value. - return kl.runner.RunInContainer(ctx, container.ID, cmd, 0) + return kl.runner.RunInContainer(container.ID, cmd, 0) } // GetExec gets the URL the exec will be served from, or nil if the Kubelet will serve it. -func (kl *Kubelet) GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { - container, err := kl.findContainer(ctx, podFullName, podUID, containerName) +func (kl *Kubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { + container, err := kl.findContainer(podFullName, podUID, containerName) if err != nil { return nil, err } if container == nil { return nil, fmt.Errorf("container not found (%q)", containerName) } - return kl.streamingRuntime.GetExec(ctx, container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY) + return kl.streamingRuntime.GetExec(container.ID, cmd, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, streamOpts.TTY) } // GetAttach gets the URL the attach will be served from, or nil if the Kubelet will serve it. -func (kl *Kubelet) GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { - container, err := kl.findContainer(ctx, podFullName, podUID, containerName) +func (kl *Kubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { + container, err := kl.findContainer(podFullName, podUID, containerName) if err != nil { return nil, err } @@ -1936,12 +1936,12 @@ func (kl *Kubelet) GetAttach(ctx context.Context, podFullName string, podUID typ } tty := containerSpec.TTY - return kl.streamingRuntime.GetAttach(ctx, container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty) + return kl.streamingRuntime.GetAttach(container.ID, streamOpts.Stdin, streamOpts.Stdout, streamOpts.Stderr, tty) } // GetPortForward gets the URL the port-forward will be served from, or nil if the Kubelet will serve it. -func (kl *Kubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { - pods, err := kl.containerRuntime.GetPods(ctx, false) +func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { + pods, err := kl.containerRuntime.GetPods(false) if err != nil { return nil, err } @@ -1954,7 +1954,7 @@ func (kl *Kubelet) GetPortForward(ctx context.Context, podName, podNamespace str return nil, fmt.Errorf("pod not found (%q)", podFullName) } - return kl.streamingRuntime.GetPortForward(ctx, podName, podNamespace, podUID, portForwardOpts.Ports) + return kl.streamingRuntime.GetPortForward(podName, podNamespace, podUID, portForwardOpts.Ports) } // cleanupOrphanedPodCgroups removes cgroups that should no longer exist. @@ -1995,9 +1995,9 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(pcm cm.PodContainerManager, cgroupP // NOTE: when if a container shares any namespace with another container it must also share the user namespace // or it will not have the correct capabilities in the namespace. This means that host user namespace // is enabled per pod, not per container. -func (kl *Kubelet) enableHostUserNamespace(ctx context.Context, pod *v1.Pod) bool { +func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool { if kubecontainer.HasPrivilegedContainer(pod) || hasHostNamespace(pod) || - hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(ctx, pod) { + hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) { return true } return false @@ -2037,7 +2037,7 @@ func hasHostNamespace(pod *v1.Pod) bool { } // hasHostMountPVC returns true if a PVC is referencing a HostPath volume. -func (kl *Kubelet) hasHostMountPVC(ctx context.Context, pod *v1.Pod) bool { +func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool { for _, volume := range pod.Spec.Volumes { pvcName := "" switch { @@ -2048,13 +2048,13 @@ func (kl *Kubelet) hasHostMountPVC(ctx context.Context, pod *v1.Pod) bool { default: continue } - pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pvcName, metav1.GetOptions{}) + pvc, err := kl.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) if err != nil { klog.InfoS("Unable to retrieve pvc", "pvc", klog.KRef(pod.Namespace, pvcName), "err", err) continue } if pvc != nil { - referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) + referencedVolume, err := kl.kubeClient.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) if err != nil { klog.InfoS("Unable to retrieve pv", "pvName", pvc.Spec.VolumeName, "err", err) continue diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index 8fc06f6cc03..cf3c01273fa 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -17,7 +17,6 @@ limitations under the License. package kubelet import ( - "context" "errors" "fmt" "net" @@ -299,7 +298,6 @@ fd00::6 podFoo.domainFoo podFoo } func TestRunInContainerNoSuchPod(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -310,7 +308,6 @@ func TestRunInContainerNoSuchPod(t *testing.T) { podNamespace := "nsFoo" containerName := "containerFoo" output, err := kubelet.RunInContainer( - ctx, kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}), "", containerName, @@ -320,7 +317,6 @@ func TestRunInContainerNoSuchPod(t *testing.T) { } func TestRunInContainer(t *testing.T) { - ctx := context.Background() for _, testError := range []error{nil, errors.New("bar")} { testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() @@ -346,7 +342,7 @@ func TestRunInContainer(t *testing.T) { }}, } cmd := []string{"ls"} - actualOutput, err := kubelet.RunInContainer(ctx, "podFoo_nsFoo", "", "containerFoo", cmd) + actualOutput, err := kubelet.RunInContainer("podFoo_nsFoo", "", "containerFoo", cmd) assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError) assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError) // this isn't 100% foolproof as a bug in a real CommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test @@ -2966,7 +2962,6 @@ func TestGetExec(t *testing.T) { for _, tc := range testcases { t.Run(tc.description, func(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -2988,7 +2983,7 @@ func TestGetExec(t *testing.T) { kubelet.containerRuntime = fakeRuntime kubelet.streamingRuntime = fakeRuntime - redirect, err := kubelet.GetExec(ctx, tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{}) + redirect, err := kubelet.GetExec(tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{}) if tc.expectError { assert.Error(t, err, description) } else { @@ -3021,7 +3016,6 @@ func TestGetPortForward(t *testing.T) { }} for _, tc := range testcases { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -3043,7 +3037,7 @@ func TestGetPortForward(t *testing.T) { kubelet.containerRuntime = fakeRuntime kubelet.streamingRuntime = fakeRuntime - redirect, err := kubelet.GetPortForward(ctx, tc.podName, podNamespace, podUID, portforward.V4Options{}) + redirect, err := kubelet.GetPortForward(tc.podName, podNamespace, podUID, portforward.V4Options{}) if tc.expectError { assert.Error(t, err, description) } else { @@ -3092,7 +3086,6 @@ func TestHasHostMountPVC(t *testing.T) { } run := func(t *testing.T, v testcase) { - ctx := context.Background() testKubelet := newTestKubelet(t, false) defer testKubelet.Cleanup() pod := &v1.Pod{ @@ -3141,7 +3134,7 @@ func TestHasHostMountPVC(t *testing.T) { return true, volumeToReturn, v.pvError }) - actual := testKubelet.kubelet.hasHostMountPVC(ctx, pod) + actual := testKubelet.kubelet.hasHostMountPVC(pod) if actual != v.expected { t.Errorf("expected %t but got %t", v.expected, actual) } diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 516cbf0471c..89f8212c1a1 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -109,7 +109,7 @@ type fakeImageGCManager struct { } func (f *fakeImageGCManager) GetImageList() ([]kubecontainer.Image, error) { - return f.fakeImageService.ListImages(context.Background()) + return f.fakeImageService.ListImages() } type TestKubelet struct { @@ -408,7 +408,6 @@ func newTestPods(count int) []*v1.Pod { } func TestSyncLoopAbort(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kubelet := testKubelet.kubelet @@ -421,11 +420,11 @@ func TestSyncLoopAbort(t *testing.T) { close(ch) // sanity check (also prevent this test from hanging in the next step) - ok := kubelet.syncLoopIteration(ctx, ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1)) + ok := kubelet.syncLoopIteration(ch, kubelet, make(chan time.Time), make(chan time.Time), make(chan *pleg.PodLifecycleEvent, 1)) require.False(t, ok, "Expected syncLoopIteration to return !ok since update chan was closed") // this should terminate immediately; if it hangs then the syncLoopIteration isn't aborting properly - kubelet.syncLoop(ctx, ch, kubelet) + kubelet.syncLoop(ch, kubelet) } func TestSyncPodsStartPod(t *testing.T) { @@ -446,7 +445,6 @@ func TestSyncPodsStartPod(t *testing.T) { } func TestHandlePodCleanupsPerQOS(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() @@ -474,7 +472,7 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) { // within a goroutine so a two second delay should be enough time to // mark the pod as killed (within this test case). - kubelet.HandlePodCleanups(ctx) + kubelet.HandlePodCleanups() // assert that unwanted pods were killed if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) { @@ -485,9 +483,9 @@ func TestHandlePodCleanupsPerQOS(t *testing.T) { // simulate Runtime.KillPod fakeRuntime.PodList = nil - kubelet.HandlePodCleanups(ctx) - kubelet.HandlePodCleanups(ctx) - kubelet.HandlePodCleanups(ctx) + kubelet.HandlePodCleanups() + kubelet.HandlePodCleanups() + kubelet.HandlePodCleanups() destroyCount := 0 err := wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) { @@ -644,7 +642,6 @@ func TestDispatchWorkOfActivePod(t *testing.T) { } func TestHandlePodCleanups(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() @@ -663,7 +660,7 @@ func TestHandlePodCleanups(t *testing.T) { } kubelet := testKubelet.kubelet - kubelet.HandlePodCleanups(ctx) + kubelet.HandlePodCleanups() // assert that unwanted pods were queued to kill if actual, expected := kubelet.podWorkers.(*fakePodWorkers).triggeredDeletion, []types.UID{"12345678"}; !reflect.DeepEqual(actual, expected) { @@ -1134,7 +1131,6 @@ func TestHandlePluginResources(t *testing.T) { // TODO(filipg): This test should be removed once StatusSyncer can do garbage collection without external signal. func TestPurgingObsoleteStatusMapEntries(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() @@ -1151,7 +1147,7 @@ func TestPurgingObsoleteStatusMapEntries(t *testing.T) { } // Sync with empty pods so that the entry in status map will be removed. kl.podManager.SetPods([]*v1.Pod{}) - kl.HandlePodCleanups(ctx) + kl.HandlePodCleanups() if _, found := kl.statusManager.GetPodStatus(podToTest.UID); found { t.Fatalf("expected to not have status cached for pod2") } @@ -1381,7 +1377,6 @@ func TestDeleteOutdatedMirrorPod(t *testing.T) { } func TestDeleteOrphanedMirrorPods(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() @@ -1431,7 +1426,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { } // Sync with an empty pod list to delete all mirror pods. - kl.HandlePodCleanups(ctx) + kl.HandlePodCleanups() assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods") for i, pod := range orphanPods { name := kubecontainer.GetPodFullName(pod) @@ -1450,7 +1445,6 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) { } func TestGetContainerInfoForMirrorPods(t *testing.T) { - ctx := context.Background() // pods contain one static and one mirror pod with the same name but // different UIDs. pods := []*v1.Pod{ @@ -1509,7 +1503,7 @@ func TestGetContainerInfoForMirrorPods(t *testing.T) { kubelet.podManager.SetPods(pods) // Use the mirror pod UID to retrieve the stats. - stats, err := kubelet.GetContainerInfo(ctx, "qux_ns", "5678", "foo", cadvisorReq) + stats, err := kubelet.GetContainerInfo("qux_ns", "5678", "foo", cadvisorReq) assert.NoError(t, err) require.NotNil(t, stats) } @@ -1670,13 +1664,11 @@ func TestCheckpointContainer(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx := context.Background() options := &runtimeapi.CheckpointContainerRequest{} if test.checkpointLocation != "" { options.Location = test.checkpointLocation } status := kubelet.CheckpointContainer( - ctx, fakePod.Pod.ID, fmt.Sprintf( "%s_%s", @@ -1824,7 +1816,6 @@ func podWithUIDNameNsSpec(uid types.UID, name, namespace string, spec v1.PodSpec } func TestDeletePodDirsForDeletedPods(t *testing.T) { - ctx := context.Background() testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */) defer testKubelet.Cleanup() kl := testKubelet.kubelet @@ -1842,19 +1833,18 @@ func TestDeletePodDirsForDeletedPods(t *testing.T) { // Pod 1 has been deleted and no longer exists. kl.podManager.SetPods([]*v1.Pod{pods[0]}) - kl.HandlePodCleanups(ctx) + kl.HandlePodCleanups() assert.True(t, dirExists(kl.getPodDir(pods[0].UID)), "Expected directory to exist for pod 0") assert.False(t, dirExists(kl.getPodDir(pods[1].UID)), "Expected directory to be deleted for pod 1") } func syncAndVerifyPodDir(t *testing.T, testKubelet *TestKubelet, pods []*v1.Pod, podsToCheck []*v1.Pod, shouldExist bool) { - ctx := context.Background() t.Helper() kl := testKubelet.kubelet kl.podManager.SetPods(pods) kl.HandlePodSyncs(pods) - kl.HandlePodCleanups(ctx) + kl.HandlePodCleanups() for i, pod := range podsToCheck { exist := dirExists(kl.getPodDir(pod.UID)) assert.Equal(t, shouldExist, exist, "directory of pod %d", i) diff --git a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go index 8786eea35d3..ca519e04935 100644 --- a/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/fake_kuberuntime_manager.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "net/http" "time" @@ -84,7 +83,6 @@ func (f *fakePodStateProvider) ShouldPodContentBeRemoved(uid types.UID) bool { } func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageService internalapi.ImageManagerService, machineInfo *cadvisorapi.MachineInfo, osInterface kubecontainer.OSInterface, runtimeHelper kubecontainer.RuntimeHelper, keyring credentialprovider.DockerKeyring) (*kubeGenericRuntimeManager, error) { - ctx := context.Background() recorder := &record.FakeRecorder{} logManager, err := logs.NewContainerLogManager(runtimeService, osInterface, "1", 2) if err != nil { @@ -109,7 +107,7 @@ func newFakeKubeRuntimeManager(runtimeService internalapi.RuntimeService, imageS memoryThrottlingFactor: 0.8, } - typedVersion, err := runtimeService.Version(ctx, kubeRuntimeAPIVersion) + typedVersion, err := runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { return nil, err } diff --git a/pkg/kubelet/kuberuntime/helpers.go b/pkg/kubelet/kuberuntime/helpers.go index c5db7c9a13f..9343b78335a 100644 --- a/pkg/kubelet/kuberuntime/helpers.go +++ b/pkg/kubelet/kuberuntime/helpers.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "fmt" "path/filepath" "strconv" @@ -120,8 +119,8 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeapi.PodSand // getImageUser gets uid or user name that will run the command(s) from image. The function // guarantees that only one of them is set. -func (m *kubeGenericRuntimeManager) getImageUser(ctx context.Context, image string) (*int64, string, error) { - resp, err := m.imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: image}, false) +func (m *kubeGenericRuntimeManager) getImageUser(image string) (*int64, string, error) { + resp, err := m.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}, false) if err != nil { return nil, "", err } diff --git a/pkg/kubelet/kuberuntime/helpers_test.go b/pkg/kubelet/kuberuntime/helpers_test.go index 47f429f7115..83901796a0a 100644 --- a/pkg/kubelet/kuberuntime/helpers_test.go +++ b/pkg/kubelet/kuberuntime/helpers_test.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "testing" "github.com/stretchr/testify/assert" @@ -32,7 +31,7 @@ import ( type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) -func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { +func (f podStatusProviderFunc) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { return f(uid, name, namespace) } @@ -218,11 +217,10 @@ func TestGetImageUser(t *testing.T) { i.SetFakeImages([]string{"test-image-ref1", "test-image-ref2", "test-image-ref3"}) for j, test := range tests { - ctx := context.Background() i.Images[test.originalImage.name].Username = test.originalImage.username i.Images[test.originalImage.name].Uid = test.originalImage.uid - uid, username, err := m.getImageUser(ctx, test.originalImage.name) + uid, username, err := m.getImageUser(test.originalImage.name) assert.NoError(t, err, "TestCase[%d]", j) if test.expectedImageUserValues.uid == (*int64)(nil) { diff --git a/pkg/kubelet/kuberuntime/instrumented_services.go b/pkg/kubelet/kuberuntime/instrumented_services.go index 54d046691cc..18ce6aa0425 100644 --- a/pkg/kubelet/kuberuntime/instrumented_services.go +++ b/pkg/kubelet/kuberuntime/instrumented_services.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "time" internalapi "k8s.io/cri-api/pkg/apis" @@ -60,130 +59,130 @@ func recordError(operation string, err error) { } } -func (in instrumentedRuntimeService) Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) { +func (in instrumentedRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { const operation = "version" defer recordOperation(operation, time.Now()) - out, err := in.service.Version(ctx, apiVersion) + out, err := in.service.Version(apiVersion) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) { +func (in instrumentedRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) { const operation = "status" defer recordOperation(operation, time.Now()) - out, err := in.service.Status(ctx, verbose) + out, err := in.service.Status(verbose) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) CreateContainer(ctx context.Context, podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (in instrumentedRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { const operation = "create_container" defer recordOperation(operation, time.Now()) - out, err := in.service.CreateContainer(ctx, podSandboxID, config, sandboxConfig) + out, err := in.service.CreateContainer(podSandboxID, config, sandboxConfig) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) StartContainer(ctx context.Context, containerID string) error { +func (in instrumentedRuntimeService) StartContainer(containerID string) error { const operation = "start_container" defer recordOperation(operation, time.Now()) - err := in.service.StartContainer(ctx, containerID) + err := in.service.StartContainer(containerID) recordError(operation, err) return err } -func (in instrumentedRuntimeService) StopContainer(ctx context.Context, containerID string, timeout int64) error { +func (in instrumentedRuntimeService) StopContainer(containerID string, timeout int64) error { const operation = "stop_container" defer recordOperation(operation, time.Now()) - err := in.service.StopContainer(ctx, containerID, timeout) + err := in.service.StopContainer(containerID, timeout) recordError(operation, err) return err } -func (in instrumentedRuntimeService) RemoveContainer(ctx context.Context, containerID string) error { +func (in instrumentedRuntimeService) RemoveContainer(containerID string) error { const operation = "remove_container" defer recordOperation(operation, time.Now()) - err := in.service.RemoveContainer(ctx, containerID) + err := in.service.RemoveContainer(containerID) recordError(operation, err) return err } -func (in instrumentedRuntimeService) ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { +func (in instrumentedRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { const operation = "list_containers" defer recordOperation(operation, time.Now()) - out, err := in.service.ListContainers(ctx, filter) + out, err := in.service.ListContainers(filter) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { +func (in instrumentedRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { const operation = "container_status" defer recordOperation(operation, time.Now()) - out, err := in.service.ContainerStatus(ctx, containerID, verbose) + out, err := in.service.ContainerStatus(containerID, verbose) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) error { +func (in instrumentedRuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.ContainerResources) error { const operation = "update_container" defer recordOperation(operation, time.Now()) - err := in.service.UpdateContainerResources(ctx, containerID, resources) + err := in.service.UpdateContainerResources(containerID, resources) recordError(operation, err) return err } -func (in instrumentedRuntimeService) ReopenContainerLog(ctx context.Context, containerID string) error { +func (in instrumentedRuntimeService) ReopenContainerLog(containerID string) error { const operation = "reopen_container_log" defer recordOperation(operation, time.Now()) - err := in.service.ReopenContainerLog(ctx, containerID) + err := in.service.ReopenContainerLog(containerID) recordError(operation, err) return err } -func (in instrumentedRuntimeService) ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) { +func (in instrumentedRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) ([]byte, []byte, error) { const operation = "exec_sync" defer recordOperation(operation, time.Now()) - stdout, stderr, err := in.service.ExecSync(ctx, containerID, cmd, timeout) + stdout, stderr, err := in.service.ExecSync(containerID, cmd, timeout) recordError(operation, err) return stdout, stderr, err } -func (in instrumentedRuntimeService) Exec(ctx context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (in instrumentedRuntimeService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { const operation = "exec" defer recordOperation(operation, time.Now()) - resp, err := in.service.Exec(ctx, req) + resp, err := in.service.Exec(req) recordError(operation, err) return resp, err } -func (in instrumentedRuntimeService) Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (in instrumentedRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { const operation = "attach" defer recordOperation(operation, time.Now()) - resp, err := in.service.Attach(ctx, req) + resp, err := in.service.Attach(req) recordError(operation, err) return resp, err } -func (in instrumentedRuntimeService) RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { +func (in instrumentedRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { const operation = "run_podsandbox" startTime := time.Now() defer recordOperation(operation, startTime) defer metrics.RunPodSandboxDuration.WithLabelValues(runtimeHandler).Observe(metrics.SinceInSeconds(startTime)) - out, err := in.service.RunPodSandbox(ctx, config, runtimeHandler) + out, err := in.service.RunPodSandbox(config, runtimeHandler) recordError(operation, err) if err != nil { metrics.RunPodSandboxErrors.WithLabelValues(runtimeHandler).Inc() @@ -191,146 +190,146 @@ func (in instrumentedRuntimeService) RunPodSandbox(ctx context.Context, config * return out, err } -func (in instrumentedRuntimeService) StopPodSandbox(ctx context.Context, podSandboxID string) error { +func (in instrumentedRuntimeService) StopPodSandbox(podSandboxID string) error { const operation = "stop_podsandbox" defer recordOperation(operation, time.Now()) - err := in.service.StopPodSandbox(ctx, podSandboxID) + err := in.service.StopPodSandbox(podSandboxID) recordError(operation, err) return err } -func (in instrumentedRuntimeService) RemovePodSandbox(ctx context.Context, podSandboxID string) error { +func (in instrumentedRuntimeService) RemovePodSandbox(podSandboxID string) error { const operation = "remove_podsandbox" defer recordOperation(operation, time.Now()) - err := in.service.RemovePodSandbox(ctx, podSandboxID) + err := in.service.RemovePodSandbox(podSandboxID) recordError(operation, err) return err } -func (in instrumentedRuntimeService) PodSandboxStatus(ctx context.Context, podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { +func (in instrumentedRuntimeService) PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { const operation = "podsandbox_status" defer recordOperation(operation, time.Now()) - out, err := in.service.PodSandboxStatus(ctx, podSandboxID, verbose) + out, err := in.service.PodSandboxStatus(podSandboxID, verbose) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { +func (in instrumentedRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { const operation = "list_podsandbox" defer recordOperation(operation, time.Now()) - out, err := in.service.ListPodSandbox(ctx, filter) + out, err := in.service.ListPodSandbox(filter) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) { +func (in instrumentedRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { const operation = "container_stats" defer recordOperation(operation, time.Now()) - out, err := in.service.ContainerStats(ctx, containerID) + out, err := in.service.ContainerStats(containerID) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { +func (in instrumentedRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { const operation = "list_container_stats" defer recordOperation(operation, time.Now()) - out, err := in.service.ListContainerStats(ctx, filter) + out, err := in.service.ListContainerStats(filter) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) { +func (in instrumentedRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { const operation = "podsandbox_stats" defer recordOperation(operation, time.Now()) - out, err := in.service.PodSandboxStats(ctx, podSandboxID) + out, err := in.service.PodSandboxStats(podSandboxID) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { +func (in instrumentedRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { const operation = "list_podsandbox_stats" defer recordOperation(operation, time.Now()) - out, err := in.service.ListPodSandboxStats(ctx, filter) + out, err := in.service.ListPodSandboxStats(filter) recordError(operation, err) return out, err } -func (in instrumentedRuntimeService) PortForward(ctx context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (in instrumentedRuntimeService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { const operation = "port_forward" defer recordOperation(operation, time.Now()) - resp, err := in.service.PortForward(ctx, req) + resp, err := in.service.PortForward(req) recordError(operation, err) return resp, err } -func (in instrumentedRuntimeService) UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) error { +func (in instrumentedRuntimeService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error { const operation = "update_runtime_config" defer recordOperation(operation, time.Now()) - err := in.service.UpdateRuntimeConfig(ctx, runtimeConfig) + err := in.service.UpdateRuntimeConfig(runtimeConfig) recordError(operation, err) return err } -func (in instrumentedImageManagerService) ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { +func (in instrumentedImageManagerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { const operation = "list_images" defer recordOperation(operation, time.Now()) - out, err := in.service.ListImages(ctx, filter) + out, err := in.service.ListImages(filter) recordError(operation, err) return out, err } -func (in instrumentedImageManagerService) ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { +func (in instrumentedImageManagerService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { const operation = "image_status" defer recordOperation(operation, time.Now()) - out, err := in.service.ImageStatus(ctx, image, verbose) + out, err := in.service.ImageStatus(image, verbose) recordError(operation, err) return out, err } -func (in instrumentedImageManagerService) PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (in instrumentedImageManagerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { const operation = "pull_image" defer recordOperation(operation, time.Now()) - imageRef, err := in.service.PullImage(ctx, image, auth, podSandboxConfig) + imageRef, err := in.service.PullImage(image, auth, podSandboxConfig) recordError(operation, err) return imageRef, err } -func (in instrumentedImageManagerService) RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) error { +func (in instrumentedImageManagerService) RemoveImage(image *runtimeapi.ImageSpec) error { const operation = "remove_image" defer recordOperation(operation, time.Now()) - err := in.service.RemoveImage(ctx, image) + err := in.service.RemoveImage(image) recordError(operation, err) return err } -func (in instrumentedImageManagerService) ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) { +func (in instrumentedImageManagerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { const operation = "image_fs_info" defer recordOperation(operation, time.Now()) - fsInfo, err := in.service.ImageFsInfo(ctx) + fsInfo, err := in.service.ImageFsInfo() recordError(operation, err) return fsInfo, nil } -func (in instrumentedRuntimeService) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error { +func (in instrumentedRuntimeService) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { const operation = "checkpoint_container" defer recordOperation(operation, time.Now()) - err := in.service.CheckpointContainer(ctx, options) + err := in.service.CheckpointContainer(options) recordError(operation, err) return err } diff --git a/pkg/kubelet/kuberuntime/instrumented_services_test.go b/pkg/kubelet/kuberuntime/instrumented_services_test.go index 75e8fa1da8b..e586905a766 100644 --- a/pkg/kubelet/kuberuntime/instrumented_services_test.go +++ b/pkg/kubelet/kuberuntime/instrumented_services_test.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "net" "net/http" "testing" @@ -71,16 +70,14 @@ func TestRecordOperation(t *testing.T) { } func TestInstrumentedVersion(t *testing.T) { - ctx := context.Background() fakeRuntime, _, _, _ := createTestRuntimeManager() irs := newInstrumentedRuntimeService(fakeRuntime) - vr, err := irs.Version(ctx, "1") + vr, err := irs.Version("1") assert.NoError(t, err) assert.Equal(t, kubeRuntimeAPIVersion, vr.Version) } func TestStatus(t *testing.T) { - ctx := context.Background() fakeRuntime, _, _, _ := createTestRuntimeManager() fakeRuntime.FakeStatus = &runtimeapi.RuntimeStatus{ Conditions: []*runtimeapi.RuntimeCondition{ @@ -89,7 +86,7 @@ func TestStatus(t *testing.T) { }, } irs := newInstrumentedRuntimeService(fakeRuntime) - actural, err := irs.Status(ctx, false) + actural, err := irs.Status(false) assert.NoError(t, err) expected := &runtimeapi.RuntimeStatus{ Conditions: []*runtimeapi.RuntimeCondition{ diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container.go b/pkg/kubelet/kuberuntime/kuberuntime_container.go index 9c042bc0284..141973837d2 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container.go @@ -170,11 +170,11 @@ func calcRestartCountByLogDir(path string) (int, error) { // * create the container // * start the container // * run the post start lifecycle hooks (if applicable) -func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) { +func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeapi.PodSandboxConfig, spec *startSpec, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string, podIPs []string) (string, error) { container := spec.container // Step 1: pull the image. - imageRef, msg, err := m.imagePuller.EnsureImageExists(ctx, pod, container, pullSecrets, podSandboxConfig) + imageRef, msg, err := m.imagePuller.EnsureImageExists(pod, container, pullSecrets, podSandboxConfig) if err != nil { s, _ := grpcstatus.FromError(err) m.recordContainerEvent(pod, container, "", v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message()) @@ -212,7 +212,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb return s.Message(), ErrCreateContainerConfig } - containerConfig, cleanupAction, err := m.generateContainerConfig(ctx, container, pod, restartCount, podIP, imageRef, podIPs, target) + containerConfig, cleanupAction, err := m.generateContainerConfig(container, pod, restartCount, podIP, imageRef, podIPs, target) if cleanupAction != nil { defer cleanupAction() } @@ -229,7 +229,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb return s.Message(), ErrPreCreateHook } - containerID, err := m.runtimeService.CreateContainer(ctx, podSandboxID, containerConfig, podSandboxConfig) + containerID, err := m.runtimeService.CreateContainer(podSandboxID, containerConfig, podSandboxConfig) if err != nil { s, _ := grpcstatus.FromError(err) m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToCreateContainer, "Error: %v", s.Message()) @@ -244,7 +244,7 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb m.recordContainerEvent(pod, container, containerID, v1.EventTypeNormal, events.CreatedContainer, fmt.Sprintf("Created container %s", container.Name)) // Step 3: start the container. - err = m.runtimeService.StartContainer(ctx, containerID) + err = m.runtimeService.StartContainer(containerID) if err != nil { s, _ := grpcstatus.FromError(err) m.recordContainerEvent(pod, container, containerID, v1.EventTypeWarning, events.FailedToStartContainer, "Error: %v", s.Message()) @@ -277,13 +277,13 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb Type: m.runtimeName, ID: containerID, } - msg, handlerErr := m.runner.Run(ctx, kubeContainerID, pod, container, container.Lifecycle.PostStart) + msg, handlerErr := m.runner.Run(kubeContainerID, pod, container, container.Lifecycle.PostStart) if handlerErr != nil { klog.ErrorS(handlerErr, "Failed to execute PostStartHook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String()) // do not record the message in the event so that secrets won't leak from the server. m.recordContainerEvent(pod, container, kubeContainerID.ID, v1.EventTypeWarning, events.FailedPostStartHook, "PostStartHook failed") - if err := m.killContainer(ctx, pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil); err != nil { + if err := m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", reasonFailedPostStartHook, nil); err != nil { klog.ErrorS(err, "Failed to kill container", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "containerID", kubeContainerID.String()) } @@ -295,13 +295,13 @@ func (m *kubeGenericRuntimeManager) startContainer(ctx context.Context, podSandb } // generateContainerConfig generates container config for kubelet runtime v1. -func (m *kubeGenericRuntimeManager) generateContainerConfig(ctx context.Context, container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) { - opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, podIPs) +func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP, imageRef string, podIPs []string, nsTarget *kubecontainer.ContainerID) (*runtimeapi.ContainerConfig, func(), error) { + opts, cleanupAction, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, podIPs) if err != nil { return nil, nil, err } - uid, username, err := m.getImageUser(ctx, container.Image) + uid, username, err := m.getImageUser(container.Image) if err != nil { return nil, cleanupAction, err } @@ -432,7 +432,7 @@ func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerO // getKubeletContainers lists containers managed by kubelet. // The boolean parameter specifies whether returns all containers including // those already exited and dead containers (used for garbage collection). -func (m *kubeGenericRuntimeManager) getKubeletContainers(ctx context.Context, allContainers bool) ([]*runtimeapi.Container, error) { +func (m *kubeGenericRuntimeManager) getKubeletContainers(allContainers bool) ([]*runtimeapi.Container, error) { filter := &runtimeapi.ContainerFilter{} if !allContainers { filter.State = &runtimeapi.ContainerStateValue{ @@ -440,7 +440,7 @@ func (m *kubeGenericRuntimeManager) getKubeletContainers(ctx context.Context, al } } - containers, err := m.runtimeService.ListContainers(ctx, filter) + containers, err := m.runtimeService.ListContainers(filter) if err != nil { klog.ErrorS(err, "ListContainers failed") return nil, err @@ -491,9 +491,9 @@ func (m *kubeGenericRuntimeManager) readLastStringFromContainerLogs(path string) } // getPodContainerStatuses gets all containers' statuses for the pod. -func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) { +func (m *kubeGenericRuntimeManager) getPodContainerStatuses(uid kubetypes.UID, name, namespace string) ([]*kubecontainer.Status, error) { // Select all containers of the given pod. - containers, err := m.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{ + containers, err := m.runtimeService.ListContainers(&runtimeapi.ContainerFilter{ LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(uid)}, }) if err != nil { @@ -504,7 +504,7 @@ func (m *kubeGenericRuntimeManager) getPodContainerStatuses(ctx context.Context, statuses := []*kubecontainer.Status{} // TODO: optimization: set maximum number of containers per container name to examine. for _, c := range containers { - resp, err := m.runtimeService.ContainerStatus(ctx, c.Id, false) + resp, err := m.runtimeService.ContainerStatus(c.Id, false) // Between List (ListContainers) and check (ContainerStatus) another thread might remove a container, and that is normal. // The previous call (ListContainers) never fails due to a pod container not existing. // Therefore, this method should not either, but instead act as if the previous call failed, @@ -579,7 +579,7 @@ func toKubeContainerStatus(status *runtimeapi.ContainerStatus, runtimeName strin } // executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes. -func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 { +func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 { klog.V(3).InfoS("Running preStop hook", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String()) start := metav1.Now() @@ -587,7 +587,7 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod go func() { defer close(done) defer utilruntime.HandleCrash() - if _, err := m.runner.Run(ctx, containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { + if _, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil { klog.ErrorS(err, "PreStop hook failed", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerSpec.Name, "containerID", containerID.String()) // do not record the message in the event so that secrets won't leak from the server. @@ -615,10 +615,10 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(ctx context.Context, pod // TODO(random-liu): Add a node e2e test to test this behaviour. // TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can // just pass the needed function not create the fake object. -func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(ctx context.Context, containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) { +func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) { var pod *v1.Pod var container *v1.Container - resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false) + resp, err := m.runtimeService.ContainerStatus(containerID.ID, false) if err != nil { return nil, nil, err } @@ -658,7 +658,7 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(ctx context. // killContainer kills a container through the following steps: // * Run the pre-stop lifecycle hooks (if applicable). // * Stop the container. -func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64) error { +func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, message string, reason containerKillReason, gracePeriodOverride *int64) error { var containerSpec *v1.Container if pod != nil { if containerSpec = kubecontainer.GetContainerSpec(pod, containerName); containerSpec == nil { @@ -667,7 +667,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P } } else { // Restore necessary information if one of the specs is nil. - restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(ctx, containerID) + restoredPod, restoredContainer, err := m.restoreSpecsFromContainerLabels(containerID) if err != nil { return err } @@ -689,7 +689,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P // Run the pre-stop lifecycle hooks if applicable and if there is enough time to run it if containerSpec.Lifecycle != nil && containerSpec.Lifecycle.PreStop != nil && gracePeriod > 0 { - gracePeriod = gracePeriod - m.executePreStopHook(ctx, pod, containerID, containerSpec, gracePeriod) + gracePeriod = gracePeriod - m.executePreStopHook(pod, containerID, containerSpec, gracePeriod) } // always give containers a minimal shutdown window to avoid unnecessary SIGKILLs if gracePeriod < minimumGracePeriodInSeconds { @@ -704,7 +704,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P klog.V(2).InfoS("Killing container with a grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod) - err := m.runtimeService.StopContainer(ctx, containerID.ID, gracePeriod) + err := m.runtimeService.StopContainer(containerID.ID, gracePeriod) if err != nil && !crierror.IsNotFound(err) { klog.ErrorS(err, "Container termination failed with gracePeriod", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "gracePeriod", gracePeriod) @@ -717,7 +717,7 @@ func (m *kubeGenericRuntimeManager) killContainer(ctx context.Context, pod *v1.P } // killContainersWithSyncResult kills all pod's containers with sync results. -func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) { +func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) { containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers)) wg := sync.WaitGroup{} @@ -728,7 +728,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Con defer wg.Done() killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, container.Name) - if err := m.killContainer(ctx, pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil { + if err := m.killContainer(pod, container.ID, container.Name, "", reasonUnknown, gracePeriodOverride); err != nil { killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) // Use runningPod for logging as the pod passed in could be *nil*. klog.ErrorS(err, "Kill container failed", "pod", klog.KRef(runningPod.Namespace, runningPod.Name), "podUID", runningPod.ID, @@ -750,7 +750,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Con // containers, we have reduced the number of outstanding init containers still // present. This reduces load on the container garbage collector by only // preserving the most recent terminated init container. -func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) { +func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, podStatus *kubecontainer.PodStatus) { // only the last execution of each init container should be preserved, and only preserve it if it is in the // list of init containers to keep. initContainerNames := sets.NewString() @@ -775,7 +775,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.C } // prune all other init containers that match this container name klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count) - if err := m.removeContainer(ctx, status.ID.ID); err != nil { + if err := m.removeContainer(status.ID.ID); err != nil { utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) continue } @@ -786,7 +786,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(ctx context.C // Remove all init containers. Note that this function does not check the state // of the container because it assumes all init containers have been stopped // before the call happens. -func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) { +func (m *kubeGenericRuntimeManager) purgeInitContainers(pod *v1.Pod, podStatus *kubecontainer.PodStatus) { initContainerNames := sets.NewString() for _, container := range pod.Spec.InitContainers { initContainerNames.Insert(container.Name) @@ -800,7 +800,7 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod count++ // Purge all init containers that match this container name klog.V(4).InfoS("Removing init container", "containerName", status.Name, "containerID", status.ID.ID, "count", count) - if err := m.removeContainer(ctx, status.ID.ID); err != nil { + if err := m.removeContainer(status.ID.ID); err != nil { utilruntime.HandleError(fmt.Errorf("failed to remove pod init container %q: %v; Skipping pod %q", status.Name, err, format.Pod(pod))) continue } @@ -867,7 +867,7 @@ func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) // GetContainerLogs returns logs of a specific container. func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) { - resp, err := m.runtimeService.ContainerStatus(ctx, containerID.ID, false) + resp, err := m.runtimeService.ContainerStatus(containerID.ID, false) if err != nil { klog.V(4).InfoS("Failed to get container status", "containerID", containerID.String(), "err", err) return fmt.Errorf("unable to retrieve container logs for %v", containerID.String()) @@ -880,7 +880,7 @@ func (m *kubeGenericRuntimeManager) GetContainerLogs(ctx context.Context, pod *v } // GetExec gets the endpoint the runtime will serve the exec request from. -func (m *kubeGenericRuntimeManager) GetExec(ctx context.Context, id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (m *kubeGenericRuntimeManager) GetExec(id kubecontainer.ContainerID, cmd []string, stdin, stdout, stderr, tty bool) (*url.URL, error) { req := &runtimeapi.ExecRequest{ ContainerId: id.ID, Cmd: cmd, @@ -889,7 +889,7 @@ func (m *kubeGenericRuntimeManager) GetExec(ctx context.Context, id kubecontaine Stdout: stdout, Stderr: stderr, } - resp, err := m.runtimeService.Exec(ctx, req) + resp, err := m.runtimeService.Exec(req) if err != nil { return nil, err } @@ -898,7 +898,7 @@ func (m *kubeGenericRuntimeManager) GetExec(ctx context.Context, id kubecontaine } // GetAttach gets the endpoint the runtime will serve the attach request from. -func (m *kubeGenericRuntimeManager) GetAttach(ctx context.Context, id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { +func (m *kubeGenericRuntimeManager) GetAttach(id kubecontainer.ContainerID, stdin, stdout, stderr, tty bool) (*url.URL, error) { req := &runtimeapi.AttachRequest{ ContainerId: id.ID, Stdin: stdin, @@ -906,7 +906,7 @@ func (m *kubeGenericRuntimeManager) GetAttach(ctx context.Context, id kubecontai Stderr: stderr, Tty: tty, } - resp, err := m.runtimeService.Attach(ctx, req) + resp, err := m.runtimeService.Attach(req) if err != nil { return nil, err } @@ -914,8 +914,8 @@ func (m *kubeGenericRuntimeManager) GetAttach(ctx context.Context, id kubecontai } // RunInContainer synchronously executes the command in the container, and returns the output. -func (m *kubeGenericRuntimeManager) RunInContainer(ctx context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { - stdout, stderr, err := m.runtimeService.ExecSync(ctx, id.ID, cmd, timeout) +func (m *kubeGenericRuntimeManager) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { + stdout, stderr, err := m.runtimeService.ExecSync(id.ID, cmd, timeout) // NOTE(tallclair): This does not correctly interleave stdout & stderr, but should be sufficient // for logging purposes. A combined output option will need to be added to the ExecSyncRequest // if more precise output ordering is ever required. @@ -928,7 +928,7 @@ func (m *kubeGenericRuntimeManager) RunInContainer(ctx context.Context, id kubec // that container logs to be removed with the container. // Notice that we assume that the container should only be removed in non-running state, and // it will not write container logs anymore in that state. -func (m *kubeGenericRuntimeManager) removeContainer(ctx context.Context, containerID string) error { +func (m *kubeGenericRuntimeManager) removeContainer(containerID string) error { klog.V(4).InfoS("Removing container", "containerID", containerID) // Call internal container post-stop lifecycle hook. if err := m.internalLifecycle.PostStopContainer(containerID); err != nil { @@ -937,22 +937,22 @@ func (m *kubeGenericRuntimeManager) removeContainer(ctx context.Context, contain // Remove the container log. // TODO: Separate log and container lifecycle management. - if err := m.removeContainerLog(ctx, containerID); err != nil { + if err := m.removeContainerLog(containerID); err != nil { return err } // Remove the container. - return m.runtimeService.RemoveContainer(ctx, containerID) + return m.runtimeService.RemoveContainer(containerID) } // removeContainerLog removes the container log. -func (m *kubeGenericRuntimeManager) removeContainerLog(ctx context.Context, containerID string) error { +func (m *kubeGenericRuntimeManager) removeContainerLog(containerID string) error { // Use log manager to remove rotated logs. - err := m.logManager.Clean(ctx, containerID) + err := m.logManager.Clean(containerID) if err != nil { return err } - resp, err := m.runtimeService.ContainerStatus(ctx, containerID, false) + resp, err := m.runtimeService.ContainerStatus(containerID, false) if err != nil { return fmt.Errorf("failed to get container status %q: %v", containerID, err) } @@ -973,8 +973,8 @@ func (m *kubeGenericRuntimeManager) removeContainerLog(ctx context.Context, cont } // DeleteContainer removes a container. -func (m *kubeGenericRuntimeManager) DeleteContainer(ctx context.Context, containerID kubecontainer.ContainerID) error { - return m.removeContainer(ctx, containerID.ID) +func (m *kubeGenericRuntimeManager) DeleteContainer(containerID kubecontainer.ContainerID) error { + return m.removeContainer(containerID.ID) } // setTerminationGracePeriod determines the grace period to use when killing a container diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go index d47a94377e0..a13bff316e7 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go @@ -20,7 +20,6 @@ limitations under the License. package kuberuntime import ( - "context" "reflect" "strconv" "testing" @@ -40,11 +39,10 @@ import ( ) func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig { - ctx := context.Background() container := &pod.Spec.Containers[containerIndex] podIP := "" restartCount := 0 - opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, []string{podIP}) + opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP, []string{podIP}) containerLogsPath := buildContainerLogsPath(container.Name, restartCount) restartCountUint32 := uint32(restartCount) envs := make([]*runtimeapi.KeyValue, len(opts.Envs)) @@ -75,7 +73,6 @@ func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerInde } func TestGenerateContainerConfig(t *testing.T) { - ctx := context.Background() _, imageService, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -105,7 +102,7 @@ func TestGenerateContainerConfig(t *testing.T) { } expectedConfig := makeExpectedConfig(m, pod, 0, false) - containerConfig, _, err := m.generateContainerConfig(ctx, &pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil) + containerConfig, _, err := m.generateContainerConfig(&pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil) assert.NoError(t, err) assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.") assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set") @@ -136,11 +133,11 @@ func TestGenerateContainerConfig(t *testing.T) { }, } - _, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) + _, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) assert.Error(t, err) - imageID, _ := imageService.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) - resp, _ := imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: imageID}, false) + imageID, _ := imageService.PullImage(&runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) + resp, _ := imageService.ImageStatus(&runtimeapi.ImageSpec{Image: imageID}, false) resp.Image.Uid = nil resp.Image.Username = "test" @@ -148,7 +145,7 @@ func TestGenerateContainerConfig(t *testing.T) { podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue - _, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) + _, _, err = m.generateContainerConfig(&podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username") } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go index 4815369c7dd..9e2993d4ee1 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_container_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_container_test.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "os" "path/filepath" "regexp" @@ -44,7 +43,6 @@ import ( // TestRemoveContainer tests removing the container and its corresponding container logs. func TestRemoveContainer(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() require.NoError(t, err) pod := &v1.Pod{ @@ -82,7 +80,7 @@ func TestRemoveContainer(t *testing.T) { fakeOS.Create(expectedContainerLogPath) fakeOS.Create(expectedContainerLogPathRotated) - err = m.removeContainer(ctx, containerID) + err = m.removeContainer(containerID) assert.NoError(t, err) // Verify container log is removed. @@ -92,7 +90,7 @@ func TestRemoveContainer(t *testing.T) { fakeOS.Removes) // Verify container is removed assert.Contains(t, fakeRuntime.Called, "RemoveContainer") - containers, err := fakeRuntime.ListContainers(ctx, &runtimeapi.ContainerFilter{Id: containerID}) + containers, err := fakeRuntime.ListContainers(&runtimeapi.ContainerFilter{Id: containerID}) assert.NoError(t, err) assert.Empty(t, containers) } @@ -125,8 +123,7 @@ func TestKillContainer(t *testing.T) { } for _, test := range tests { - ctx := context.Background() - err := m.killContainer(ctx, test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride) + err := m.killContainer(test.pod, test.containerID, test.containerName, test.reason, "", &test.gracePeriodOverride) if test.succeed != (err == nil) { t.Errorf("%s: expected %v, got %v (%v)", test.caseName, test.succeed, (err == nil), err) } @@ -306,9 +303,8 @@ func TestLifeCycleHook(t *testing.T) { // Configured and works as expected t.Run("PreStop-CMDExec", func(t *testing.T) { - ctx := context.Background() testPod.Spec.Containers[0].Lifecycle = cmdLifeCycle - m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod) + m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod) if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] { t.Errorf("CMD Prestop hook was not invoked") } @@ -317,23 +313,21 @@ func TestLifeCycleHook(t *testing.T) { // Configured and working HTTP hook t.Run("PreStop-HTTPGet", func(t *testing.T) { t.Run("inconsistent", func(t *testing.T) { - ctx := context.Background() defer func() { fakeHTTP.req = nil }() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)() httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{} testPod.Spec.Containers[0].Lifecycle = httpLifeCycle - m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod) + m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod) if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) { t.Errorf("HTTP Prestop hook was not invoked") } }) t.Run("consistent", func(t *testing.T) { - ctx := context.Background() defer func() { fakeHTTP.req = nil }() httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt(80) testPod.Spec.Containers[0].Lifecycle = httpLifeCycle - m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod) + m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriod) if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) { t.Errorf("HTTP Prestop hook was not invoked") @@ -343,13 +337,12 @@ func TestLifeCycleHook(t *testing.T) { // When there is no time to run PreStopHook t.Run("PreStop-NoTimeToRun", func(t *testing.T) { - ctx := context.Background() gracePeriodLocal := int64(0) testPod.DeletionGracePeriodSeconds = &gracePeriodLocal testPod.Spec.TerminationGracePeriodSeconds = &gracePeriodLocal - m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriodLocal) + m.killContainer(testPod, cID, "foo", "testKill", "", &gracePeriodLocal) if fakeHTTP.req != nil { t.Errorf("HTTP Prestop hook Should not execute when gracePeriod is 0") @@ -358,7 +351,7 @@ func TestLifeCycleHook(t *testing.T) { // Post Start script t.Run("PostStart-CmdExe", func(t *testing.T) { - ctx := context.Background() + // Fake all the things you need before trying to create a container fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod) fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0) @@ -379,7 +372,7 @@ func TestLifeCycleHook(t *testing.T) { } // Now try to create a container, which should in turn invoke PostStart Hook - _, err := m.startContainer(ctx, fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{}) + _, err := m.startContainer(fakeSandBox.Id, fakeSandBoxConfig, containerStartSpec(testContainer), testPod, fakePodStatus, nil, "", []string{}) if err != nil { t.Errorf("startContainer error =%v", err) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc.go b/pkg/kubelet/kuberuntime/kuberuntime_gc.go index eba6159c79e..6e676ffef82 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "fmt" "os" "path/filepath" @@ -112,18 +111,18 @@ func (a sandboxByCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a sandboxByCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) } // enforceMaxContainersPerEvictUnit enforces MaxPerPodContainer for each evictUnit. -func (cgc *containerGC) enforceMaxContainersPerEvictUnit(ctx context.Context, evictUnits containersByEvictUnit, MaxContainers int) { +func (cgc *containerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) { for key := range evictUnits { toRemove := len(evictUnits[key]) - MaxContainers if toRemove > 0 { - evictUnits[key] = cgc.removeOldestN(ctx, evictUnits[key], toRemove) + evictUnits[key] = cgc.removeOldestN(evictUnits[key], toRemove) } } } // removeOldestN removes the oldest toRemove containers and returns the resulting slice. -func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containerGCInfo, toRemove int) []containerGCInfo { +func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo { // Remove from oldest to newest (last to first). numToKeep := len(containers) - toRemove if numToKeep > 0 { @@ -138,12 +137,12 @@ func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containe ID: containers[i].id, } message := "Container is in unknown state, try killing it before removal" - if err := cgc.manager.killContainer(ctx, nil, id, containers[i].name, message, reasonUnknown, nil); err != nil { + if err := cgc.manager.killContainer(nil, id, containers[i].name, message, reasonUnknown, nil); err != nil { klog.ErrorS(err, "Failed to stop container", "containerID", containers[i].id) continue } } - if err := cgc.manager.removeContainer(ctx, containers[i].id); err != nil { + if err := cgc.manager.removeContainer(containers[i].id); err != nil { klog.ErrorS(err, "Failed to remove container", "containerID", containers[i].id) } } @@ -154,7 +153,7 @@ func (cgc *containerGC) removeOldestN(ctx context.Context, containers []containe // removeOldestNSandboxes removes the oldest inactive toRemove sandboxes and // returns the resulting slice. -func (cgc *containerGC) removeOldestNSandboxes(ctx context.Context, sandboxes []sandboxGCInfo, toRemove int) { +func (cgc *containerGC) removeOldestNSandboxes(sandboxes []sandboxGCInfo, toRemove int) { numToKeep := len(sandboxes) - toRemove if numToKeep > 0 { sort.Sort(sandboxByCreated(sandboxes)) @@ -162,30 +161,30 @@ func (cgc *containerGC) removeOldestNSandboxes(ctx context.Context, sandboxes [] // Remove from oldest to newest (last to first). for i := len(sandboxes) - 1; i >= numToKeep; i-- { if !sandboxes[i].active { - cgc.removeSandbox(ctx, sandboxes[i].id) + cgc.removeSandbox(sandboxes[i].id) } } } // removeSandbox removes the sandbox by sandboxID. -func (cgc *containerGC) removeSandbox(ctx context.Context, sandboxID string) { +func (cgc *containerGC) removeSandbox(sandboxID string) { klog.V(4).InfoS("Removing sandbox", "sandboxID", sandboxID) // In normal cases, kubelet should've already called StopPodSandbox before // GC kicks in. To guard against the rare cases where this is not true, try // stopping the sandbox before removing it. - if err := cgc.client.StopPodSandbox(ctx, sandboxID); err != nil { + if err := cgc.client.StopPodSandbox(sandboxID); err != nil { klog.ErrorS(err, "Failed to stop sandbox before removing", "sandboxID", sandboxID) return } - if err := cgc.client.RemovePodSandbox(ctx, sandboxID); err != nil { + if err := cgc.client.RemovePodSandbox(sandboxID); err != nil { klog.ErrorS(err, "Failed to remove sandbox", "sandboxID", sandboxID) } } // evictableContainers gets all containers that are evictable. Evictable containers are: not running // and created more than MinAge ago. -func (cgc *containerGC) evictableContainers(ctx context.Context, minAge time.Duration) (containersByEvictUnit, error) { - containers, err := cgc.manager.getKubeletContainers(ctx, true) +func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, error) { + containers, err := cgc.manager.getKubeletContainers(true) if err != nil { return containersByEvictUnit{}, err } @@ -221,9 +220,9 @@ func (cgc *containerGC) evictableContainers(ctx context.Context, minAge time.Dur } // evict all containers that are evictable -func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { +func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { // Separate containers by evict units. - evictUnits, err := cgc.evictableContainers(ctx, gcPolicy.MinAge) + evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge) if err != nil { return err } @@ -232,7 +231,7 @@ func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontai if allSourcesReady { for key, unit := range evictUnits { if cgc.podStateProvider.ShouldPodContentBeRemoved(key.uid) || (evictNonDeletedPods && cgc.podStateProvider.ShouldPodRuntimeBeRemoved(key.uid)) { - cgc.removeOldestN(ctx, unit, len(unit)) // Remove all. + cgc.removeOldestN(unit, len(unit)) // Remove all. delete(evictUnits, key) } } @@ -240,7 +239,7 @@ func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontai // Enforce max containers per evict unit. if gcPolicy.MaxPerPodContainer >= 0 { - cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, gcPolicy.MaxPerPodContainer) + cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer) } // Enforce max total number of containers. @@ -250,7 +249,7 @@ func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontai if numContainersPerEvictUnit < 1 { numContainersPerEvictUnit = 1 } - cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, numContainersPerEvictUnit) + cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit) // If we still need to evict, evict oldest first. numContainers := evictUnits.NumContainers() @@ -261,7 +260,7 @@ func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontai } sort.Sort(byCreated(flattened)) - cgc.removeOldestN(ctx, flattened, numContainers-gcPolicy.MaxContainers) + cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers) } } return nil @@ -273,13 +272,13 @@ func (cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontai // 2. contains no containers. // 3. belong to a non-existent (i.e., already removed) pod, or is not the // most recently created sandbox for the pod. -func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods bool) error { - containers, err := cgc.manager.getKubeletContainers(ctx, true) +func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error { + containers, err := cgc.manager.getKubeletContainers(true) if err != nil { return err } - sandboxes, err := cgc.manager.getKubeletSandboxes(ctx, true) + sandboxes, err := cgc.manager.getKubeletSandboxes(true) if err != nil { return err } @@ -316,10 +315,10 @@ func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods // Remove all evictable sandboxes if the pod has been removed. // Note that the latest dead sandbox is also removed if there is // already an active one. - cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes)) + cgc.removeOldestNSandboxes(sandboxes, len(sandboxes)) } else { // Keep latest one if the pod still exists. - cgc.removeOldestNSandboxes(ctx, sandboxes, len(sandboxes)-1) + cgc.removeOldestNSandboxes(sandboxes, len(sandboxes)-1) } } return nil @@ -327,7 +326,7 @@ func (cgc *containerGC) evictSandboxes(ctx context.Context, evictNonDeletedPods // evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories // are evictable if there are no corresponding pods. -func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesReady bool) error { +func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error { osInterface := cgc.manager.osInterface if allSourcesReady { // Only remove pod logs directories when all sources are ready. @@ -355,7 +354,7 @@ func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesR for _, logSymlink := range logSymlinks { if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) { if containerID, err := getContainerIDFromLegacyLogSymlink(logSymlink); err == nil { - resp, err := cgc.manager.runtimeService.ContainerStatus(ctx, containerID, false) + resp, err := cgc.manager.runtimeService.ContainerStatus(containerID, false) if err != nil { // TODO: we should handle container not found (i.e. container was deleted) case differently // once https://github.com/kubernetes/kubernetes/issues/63336 is resolved @@ -406,20 +405,20 @@ func (cgc *containerGC) evictPodLogsDirectories(ctx context.Context, allSourcesR // * removes oldest dead containers by enforcing gcPolicy.MaxContainers. // * gets evictable sandboxes which are not ready and contains no containers. // * removes evictable sandboxes. -func (cgc *containerGC) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { +func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { errors := []error{} // Remove evictable containers - if err := cgc.evictContainers(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil { + if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil { errors = append(errors, err) } // Remove sandboxes with zero containers - if err := cgc.evictSandboxes(ctx, evictNonDeletedPods); err != nil { + if err := cgc.evictSandboxes(evictNonDeletedPods); err != nil { errors = append(errors, err) } // Remove pod sandbox log directory - if err := cgc.evictPodLogsDirectories(ctx, allSourcesReady); err != nil { + if err := cgc.evictPodLogsDirectories(allSourcesReady); err != nil { errors = append(errors, err) } return utilerrors.NewAggregate(errors) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go index 15b292db709..0434a253f7e 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_gc_test.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "os" "path/filepath" "testing" @@ -161,7 +160,6 @@ func TestSandboxGC(t *testing.T) { }, } { t.Run(test.description, func(t *testing.T) { - ctx := context.Background() podStateProvider.removed = make(map[types.UID]struct{}) podStateProvider.terminated = make(map[types.UID]struct{}) fakeSandboxes := makeFakePodSandboxes(t, m, test.sandboxes) @@ -177,13 +175,13 @@ func TestSandboxGC(t *testing.T) { fakeRuntime.SetFakeSandboxes(fakeSandboxes) fakeRuntime.SetFakeContainers(fakeContainers) - err := m.containerGC.evictSandboxes(ctx, test.evictTerminatingPods) + err := m.containerGC.evictSandboxes(test.evictTerminatingPods) assert.NoError(t, err) - realRemain, err := fakeRuntime.ListPodSandbox(ctx, nil) + realRemain, err := fakeRuntime.ListPodSandbox(nil) assert.NoError(t, err) assert.Len(t, realRemain, len(test.remain)) for _, remain := range test.remain { - resp, err := fakeRuntime.PodSandboxStatus(ctx, fakeSandboxes[remain].Id, false) + resp, err := fakeRuntime.PodSandboxStatus(fakeSandboxes[remain].Id, false) assert.NoError(t, err) assert.Equal(t, &fakeSandboxes[remain].PodSandboxStatus, resp.Status) } @@ -389,7 +387,6 @@ func TestContainerGC(t *testing.T) { }, } { t.Run(test.description, func(t *testing.T) { - ctx := context.Background() podStateProvider.removed = make(map[types.UID]struct{}) podStateProvider.terminated = make(map[types.UID]struct{}) fakeContainers := makeFakeContainers(t, m, test.containers) @@ -406,13 +403,13 @@ func TestContainerGC(t *testing.T) { if test.policy == nil { test.policy = &defaultGCPolicy } - err := m.containerGC.evictContainers(ctx, *test.policy, test.allSourcesReady, test.evictTerminatingPods) + err := m.containerGC.evictContainers(*test.policy, test.allSourcesReady, test.evictTerminatingPods) assert.NoError(t, err) - realRemain, err := fakeRuntime.ListContainers(ctx, nil) + realRemain, err := fakeRuntime.ListContainers(nil) assert.NoError(t, err) assert.Len(t, realRemain, len(test.remain)) for _, remain := range test.remain { - resp, err := fakeRuntime.ContainerStatus(ctx, fakeContainers[remain].Id, false) + resp, err := fakeRuntime.ContainerStatus(fakeContainers[remain].Id, false) assert.NoError(t, err) assert.Equal(t, &fakeContainers[remain].ContainerStatus, resp.Status) } @@ -422,7 +419,6 @@ func TestContainerGC(t *testing.T) { // Notice that legacy container symlink is not tested since it may be deprecated soon. func TestPodLogDirectoryGC(t *testing.T) { - ctx := context.Background() _, _, m, err := createTestRuntimeManager() assert.NoError(t, err) fakeOS := m.osInterface.(*containertest.FakeOS) @@ -453,19 +449,18 @@ func TestPodLogDirectoryGC(t *testing.T) { } // allSourcesReady == true, pod log directories without corresponding pod should be removed. - err = m.containerGC.evictPodLogsDirectories(ctx, true) + err = m.containerGC.evictPodLogsDirectories(true) assert.NoError(t, err) assert.Equal(t, removed, fakeOS.Removes) // allSourcesReady == false, pod log directories should not be removed. fakeOS.Removes = []string{} - err = m.containerGC.evictPodLogsDirectories(ctx, false) + err = m.containerGC.evictPodLogsDirectories(false) assert.NoError(t, err) assert.Empty(t, fakeOS.Removes) } func TestUnknownStateContainerGC(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -477,13 +472,13 @@ func TestUnknownStateContainerGC(t *testing.T) { }) fakeRuntime.SetFakeContainers(fakeContainers) - err = m.containerGC.evictContainers(ctx, defaultGCPolicy, true, false) + err = m.containerGC.evictContainers(defaultGCPolicy, true, false) assert.NoError(t, err) assert.Contains(t, fakeRuntime.GetCalls(), "StopContainer", "RemoveContainer", "container in unknown state should be stopped before being removed") - remain, err := fakeRuntime.ListContainers(ctx, nil) + remain, err := fakeRuntime.ListContainers(nil) assert.NoError(t, err) assert.Empty(t, remain) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image.go b/pkg/kubelet/kuberuntime/kuberuntime_image.go index 7ae26947701..a52cbaa1a36 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image.go @@ -17,8 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" - v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" @@ -30,7 +28,7 @@ import ( // PullImage pulls an image from the network to local storage using the supplied // secrets if necessary. -func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { img := image.Image repoToPull, _, _, err := parsers.ParseImageName(img) if err != nil { @@ -48,7 +46,7 @@ func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecon if !withCredentials { klog.V(3).InfoS("Pulling image without credentials", "image", img) - imageRef, err := m.imageService.PullImage(ctx, imgSpec, nil, podSandboxConfig) + imageRef, err := m.imageService.PullImage(imgSpec, nil, podSandboxConfig) if err != nil { klog.ErrorS(err, "Failed to pull image", "image", img) return "", err @@ -68,7 +66,7 @@ func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecon RegistryToken: currentCreds.RegistryToken, } - imageRef, err := m.imageService.PullImage(ctx, imgSpec, auth, podSandboxConfig) + imageRef, err := m.imageService.PullImage(imgSpec, auth, podSandboxConfig) // If there was no error, return success if err == nil { return imageRef, nil @@ -82,8 +80,8 @@ func (m *kubeGenericRuntimeManager) PullImage(ctx context.Context, image kubecon // GetImageRef gets the ID of the image which has already been in // the local storage. It returns ("", nil) if the image isn't in the local storage. -func (m *kubeGenericRuntimeManager) GetImageRef(ctx context.Context, image kubecontainer.ImageSpec) (string, error) { - resp, err := m.imageService.ImageStatus(ctx, toRuntimeAPIImageSpec(image), false) +func (m *kubeGenericRuntimeManager) GetImageRef(image kubecontainer.ImageSpec) (string, error) { + resp, err := m.imageService.ImageStatus(toRuntimeAPIImageSpec(image), false) if err != nil { klog.ErrorS(err, "Failed to get image status", "image", image.Image) return "", err @@ -95,10 +93,10 @@ func (m *kubeGenericRuntimeManager) GetImageRef(ctx context.Context, image kubec } // ListImages gets all images currently on the machine. -func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubecontainer.Image, error) { +func (m *kubeGenericRuntimeManager) ListImages() ([]kubecontainer.Image, error) { var images []kubecontainer.Image - allImages, err := m.imageService.ListImages(ctx, nil) + allImages, err := m.imageService.ListImages(nil) if err != nil { klog.ErrorS(err, "Failed to list images") return nil, err @@ -118,8 +116,8 @@ func (m *kubeGenericRuntimeManager) ListImages(ctx context.Context) ([]kubeconta } // RemoveImage removes the specified image. -func (m *kubeGenericRuntimeManager) RemoveImage(ctx context.Context, image kubecontainer.ImageSpec) error { - err := m.imageService.RemoveImage(ctx, &runtimeapi.ImageSpec{Image: image.Image}) +func (m *kubeGenericRuntimeManager) RemoveImage(image kubecontainer.ImageSpec) error { + err := m.imageService.RemoveImage(&runtimeapi.ImageSpec{Image: image.Image}) if err != nil { klog.ErrorS(err, "Failed to remove image", "image", image.Image) return err @@ -132,8 +130,8 @@ func (m *kubeGenericRuntimeManager) RemoveImage(ctx context.Context, image kubec // Notice that current logic doesn't really work for images which share layers (e.g. docker image), // this is a known issue, and we'll address this by getting imagefs stats directly from CRI. // TODO: Get imagefs stats directly from CRI. -func (m *kubeGenericRuntimeManager) ImageStats(ctx context.Context) (*kubecontainer.ImageStats, error) { - allImages, err := m.imageService.ListImages(ctx, nil) +func (m *kubeGenericRuntimeManager) ImageStats() (*kubecontainer.ImageStats, error) { + allImages, err := m.imageService.ListImages(nil) if err != nil { klog.ErrorS(err, "Failed to list images") return nil, err diff --git a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go index fab378137b2..a6c1adae490 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_image_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_image_test.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "encoding/json" "fmt" "testing" @@ -33,37 +32,34 @@ import ( ) func TestPullImage(t *testing.T) { - ctx := context.Background() _, _, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) - imageRef, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) + imageRef, err := fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) assert.NoError(t, err) assert.Equal(t, "busybox", imageRef) - images, err := fakeManager.ListImages(ctx) + images, err := fakeManager.ListImages() assert.NoError(t, err) assert.Equal(t, 1, len(images)) assert.Equal(t, images[0].RepoTags, []string{"busybox"}) } func TestPullImageWithError(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) fakeImageService.InjectError("PullImage", fmt.Errorf("test-error")) - imageRef, err := fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) + imageRef, err := fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) assert.Error(t, err) assert.Equal(t, "", imageRef) - images, err := fakeManager.ListImages(ctx) + images, err := fakeManager.ListImages() assert.NoError(t, err) assert.Equal(t, 0, len(images)) } func TestListImages(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) @@ -71,7 +67,7 @@ func TestListImages(t *testing.T) { expected := sets.NewString(images...) fakeImageService.SetFakeImages(images) - actualImages, err := fakeManager.ListImages(ctx) + actualImages, err := fakeManager.ListImages() assert.NoError(t, err) actual := sets.NewString() for _, i := range actualImages { @@ -82,37 +78,34 @@ func TestListImages(t *testing.T) { } func TestListImagesWithError(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure")) - actualImages, err := fakeManager.ListImages(ctx) + actualImages, err := fakeManager.ListImages() assert.Error(t, err) assert.Nil(t, actualImages) } func TestGetImageRef(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) image := "busybox" fakeImageService.SetFakeImages([]string{image}) - imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image}) + imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image}) assert.NoError(t, err) assert.Equal(t, image, imageRef) } func TestGetImageRefImageNotAvailableLocally(t *testing.T) { - ctx := context.Background() _, _, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) image := "busybox" - imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image}) + imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image}) assert.NoError(t, err) imageNotAvailableLocallyRef := "" @@ -120,7 +113,6 @@ func TestGetImageRefImageNotAvailableLocally(t *testing.T) { } func TestGetImageRefWithError(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) @@ -128,52 +120,48 @@ func TestGetImageRefWithError(t *testing.T) { fakeImageService.InjectError("ImageStatus", fmt.Errorf("test-error")) - imageRef, err := fakeManager.GetImageRef(ctx, kubecontainer.ImageSpec{Image: image}) + imageRef, err := fakeManager.GetImageRef(kubecontainer.ImageSpec{Image: image}) assert.Error(t, err) assert.Equal(t, "", imageRef) } func TestRemoveImage(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) - _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) + _, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) assert.NoError(t, err) assert.Equal(t, 1, len(fakeImageService.Images)) - err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}) + err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"}) assert.NoError(t, err) assert.Equal(t, 0, len(fakeImageService.Images)) } func TestRemoveImageNoOpIfImageNotLocal(t *testing.T) { - ctx := context.Background() _, _, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) - err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}) + err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"}) assert.NoError(t, err) } func TestRemoveImageWithError(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) - _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) + _, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: "busybox"}, nil, nil) assert.NoError(t, err) assert.Equal(t, 1, len(fakeImageService.Images)) fakeImageService.InjectError("RemoveImage", fmt.Errorf("test-failure")) - err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}) + err = fakeManager.RemoveImage(kubecontainer.ImageSpec{Image: "busybox"}) assert.Error(t, err) assert.Equal(t, 1, len(fakeImageService.Images)) } func TestImageStats(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) @@ -182,26 +170,24 @@ func TestImageStats(t *testing.T) { images := []string{"1111", "2222", "3333"} fakeImageService.SetFakeImages(images) - actualStats, err := fakeManager.ImageStats(ctx) + actualStats, err := fakeManager.ImageStats() assert.NoError(t, err) expectedStats := &kubecontainer.ImageStats{TotalStorageBytes: imageSize * uint64(len(images))} assert.Equal(t, expectedStats, actualStats) } func TestImageStatsWithError(t *testing.T) { - ctx := context.Background() _, fakeImageService, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) fakeImageService.InjectError("ListImages", fmt.Errorf("test-failure")) - actualImageStats, err := fakeManager.ImageStats(ctx) + actualImageStats, err := fakeManager.ImageStats() assert.Error(t, err) assert.Nil(t, actualImageStats) } func TestPullWithSecrets(t *testing.T) { - ctx := context.Background() // auth value is equivalent to: "username":"passed-user","password":"passed-password" dockerCfg := map[string]map[string]string{"index.docker.io/v1/": {"email": "passed-email", "auth": "cGFzc2VkLXVzZXI6cGFzc2VkLXBhc3N3b3Jk"}} dockercfgContent, err := json.Marshal(dockerCfg) @@ -266,14 +252,13 @@ func TestPullWithSecrets(t *testing.T) { _, fakeImageService, fakeManager, err := customTestRuntimeManager(builtInKeyRing) require.NoError(t, err) - _, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: test.imageName}, test.passedSecrets, nil) + _, err = fakeManager.PullImage(kubecontainer.ImageSpec{Image: test.imageName}, test.passedSecrets, nil) require.NoError(t, err) fakeImageService.AssertImagePulledWithAuth(t, &runtimeapi.ImageSpec{Image: test.imageName, Annotations: make(map[string]string)}, test.expectedAuth, description) } } func TestPullThenListWithAnnotations(t *testing.T) { - ctx := context.Background() _, _, fakeManager, err := createTestRuntimeManager() assert.NoError(t, err) @@ -284,10 +269,10 @@ func TestPullThenListWithAnnotations(t *testing.T) { }, } - _, err = fakeManager.PullImage(ctx, imageSpec, nil, nil) + _, err = fakeManager.PullImage(imageSpec, nil, nil) assert.NoError(t, err) - images, err := fakeManager.ListImages(ctx) + images, err := fakeManager.ListImages() assert.NoError(t, err) assert.Equal(t, 1, len(images)) assert.Equal(t, images[0].Spec, imageSpec) diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index 41a0ced806b..7e245cd00bd 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "errors" "fmt" "os" @@ -196,7 +195,6 @@ func NewKubeGenericRuntimeManager( getNodeAllocatable func() v1.ResourceList, memoryThrottlingFactor float64, ) (KubeGenericRuntime, error) { - ctx := context.Background() runtimeService = newInstrumentedRuntimeService(runtimeService) imageService = newInstrumentedImageManagerService(imageService) kubeRuntimeManager := &kubeGenericRuntimeManager{ @@ -222,7 +220,7 @@ func NewKubeGenericRuntimeManager( memoryThrottlingFactor: memoryThrottlingFactor, } - typedVersion, err := kubeRuntimeManager.getTypedVersion(ctx) + typedVersion, err := kubeRuntimeManager.getTypedVersion() if err != nil { klog.ErrorS(err, "Get runtime version failed") return nil, err @@ -273,7 +271,7 @@ func NewKubeGenericRuntimeManager( kubeRuntimeManager.versionCache = cache.NewObjectCache( func() (interface{}, error) { - return kubeRuntimeManager.getTypedVersion(ctx) + return kubeRuntimeManager.getTypedVersion() }, versionCacheTTL, ) @@ -293,8 +291,8 @@ func newRuntimeVersion(version string) (*utilversion.Version, error) { return utilversion.ParseGeneric(version) } -func (m *kubeGenericRuntimeManager) getTypedVersion(ctx context.Context) (*runtimeapi.VersionResponse, error) { - typedVersion, err := m.runtimeService.Version(ctx, kubeRuntimeAPIVersion) +func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) { + typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion) if err != nil { return nil, fmt.Errorf("get remote runtime typed version failed: %v", err) } @@ -302,8 +300,8 @@ func (m *kubeGenericRuntimeManager) getTypedVersion(ctx context.Context) (*runti } // Version returns the version information of the container runtime. -func (m *kubeGenericRuntimeManager) Version(ctx context.Context) (kubecontainer.Version, error) { - typedVersion, err := m.getTypedVersion(ctx) +func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) { + typedVersion, err := m.getTypedVersion() if err != nil { return nil, err } @@ -326,8 +324,8 @@ func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error) // Status returns the status of the runtime. An error is returned if the Status // function itself fails, nil otherwise. -func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer.RuntimeStatus, error) { - resp, err := m.runtimeService.Status(ctx, false) +func (m *kubeGenericRuntimeManager) Status() (*kubecontainer.RuntimeStatus, error) { + resp, err := m.runtimeService.Status(false) if err != nil { return nil, err } @@ -340,9 +338,9 @@ func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer. // GetPods returns a list of containers grouped by pods. The boolean parameter // specifies whether the runtime returns all containers including those already // exited and dead containers (used for garbage collection). -func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*kubecontainer.Pod, error) { +func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, error) { pods := make(map[kubetypes.UID]*kubecontainer.Pod) - sandboxes, err := m.getKubeletSandboxes(ctx, all) + sandboxes, err := m.getKubeletSandboxes(all) if err != nil { return nil, err } @@ -370,7 +368,7 @@ func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*k p.CreatedAt = uint64(s.GetCreatedAt()) } - containers, err := m.getKubeletContainers(ctx, all) + containers, err := m.getKubeletContainers(all) if err != nil { return nil, err } @@ -671,7 +669,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *ku // 5. Create ephemeral containers. // 6. Create init containers. // 7. Create normal containers. -func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { +func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { // Step 1: Compute sandbox and container changes. podContainerChanges := m.computePodActions(pod, podStatus) klog.V(3).InfoS("computePodActions got for pod", "podActions", podContainerChanges, "pod", klog.KObj(pod)) @@ -695,7 +693,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po klog.V(4).InfoS("Stopping PodSandbox for pod, because all other containers are dead", "pod", klog.KObj(pod)) } - killResult := m.killPodWithSyncResult(ctx, pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil) + killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil) result.AddPodSyncResult(killResult) if killResult.Error() != nil { klog.ErrorS(killResult.Error(), "killPodWithSyncResult failed") @@ -703,7 +701,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po } if podContainerChanges.CreateSandbox { - m.purgeInitContainers(ctx, pod, podStatus) + m.purgeInitContainers(pod, podStatus) } } else { // Step 3: kill any running containers in this pod which are not to keep. @@ -711,7 +709,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po klog.V(3).InfoS("Killing unwanted container for pod", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod)) killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name) result.AddSyncResult(killContainerResult) - if err := m.killContainer(ctx, pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil); err != nil { + if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, containerInfo.reason, nil); err != nil { killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) klog.ErrorS(err, "killContainer for pod failed", "containerName", containerInfo.name, "containerID", containerID, "pod", klog.KObj(pod)) return @@ -722,7 +720,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po // Keep terminated init containers fairly aggressively controlled // This is an optimization because container removals are typically handled // by container garbage collector. - m.pruneInitContainersBeforeStart(ctx, pod, podStatus) + m.pruneInitContainersBeforeStart(pod, podStatus) // We pass the value of the PRIMARY podIP and list of podIPs down to // generatePodSandboxConfig and generateContainerConfig, which in turn @@ -760,7 +758,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po // When runc supports slash as sysctl separator, this function can no longer be used. sysctl.ConvertPodSysctlsVariableToDotsSeparator(pod.Spec.SecurityContext) - podSandboxID, msg, err = m.createPodSandbox(ctx, pod, podContainerChanges.Attempt) + podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt) if err != nil { // createPodSandbox can return an error from CNI, CSI, // or CRI if the Pod has been deleted while the POD is @@ -785,7 +783,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po } klog.V(4).InfoS("Created PodSandbox for pod", "podSandboxID", podSandboxID, "pod", klog.KObj(pod)) - resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false) + resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false) if err != nil { ref, referr := ref.GetReference(legacyscheme.Scheme, pod) if referr != nil { @@ -834,7 +832,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po // currently: "container", "init container" or "ephemeral container" // metricLabel is the label used to describe this type of container in monitoring metrics. // currently: "container", "init_container" or "ephemeral_container" - start := func(ctx context.Context, typeName, metricLabel string, spec *startSpec) error { + start := func(typeName, metricLabel string, spec *startSpec) error { startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, spec.container.Name) result.AddSyncResult(startContainerResult) @@ -851,7 +849,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po } klog.V(4).InfoS("Creating container in pod", "containerType", typeName, "container", spec.container, "pod", klog.KObj(pod)) // NOTE (aramase) podIPs are populated for single stack and dual stack clusters. Send only podIPs. - if msg, err := m.startContainer(ctx, podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs); err != nil { + if msg, err := m.startContainer(podSandboxID, podSandboxConfig, spec, pod, podStatus, pullSecrets, podIP, podIPs); err != nil { // startContainer() returns well-defined error codes that have reasonable cardinality for metrics and are // useful to cluster administrators to distinguish "server errors" from "user errors". metrics.StartedContainersErrorsTotal.WithLabelValues(metricLabel, err.Error()).Inc() @@ -878,13 +876,13 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po // are errors starting an init container. In practice init containers will start first since ephemeral // containers cannot be specified on pod creation. for _, idx := range podContainerChanges.EphemeralContainersToStart { - start(ctx, "ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx])) + start("ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx])) } // Step 6: start the init container. if container := podContainerChanges.NextInitContainerToStart; container != nil { // Start the next init container. - if err := start(ctx, "init container", metrics.InitContainer, containerStartSpec(container)); err != nil { + if err := start("init container", metrics.InitContainer, containerStartSpec(container)); err != nil { return } @@ -894,7 +892,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po // Step 7: start containers in podContainerChanges.ContainersToStart. for _, idx := range podContainerChanges.ContainersToStart { - start(ctx, "container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx])) + start("container", metrics.Container, containerStartSpec(&pod.Spec.Containers[idx])) } return @@ -937,15 +935,15 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Contain // gracePeriodOverride if specified allows the caller to override the pod default grace period. // only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data. // it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios. -func (m *kubeGenericRuntimeManager) KillPod(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { - err := m.killPodWithSyncResult(ctx, pod, runningPod, gracePeriodOverride) +func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error { + err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride) return err.Error() } // killPodWithSyncResult kills a runningPod and returns SyncResult. // Note: The pod passed in could be *nil* when kubelet restarted. -func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { - killContainerResults := m.killContainersWithSyncResult(ctx, pod, runningPod, gracePeriodOverride) +func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) { + killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride) for _, containerResult := range killContainerResults { result.AddSyncResult(containerResult) } @@ -955,7 +953,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, p result.AddSyncResult(killSandboxResult) // Stop all sandboxes belongs to same pod for _, podSandbox := range runningPod.Sandboxes { - if err := m.runtimeService.StopPodSandbox(ctx, podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) { + if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil && !crierror.IsNotFound(err) { killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error()) klog.ErrorS(nil, "Failed to stop sandbox", "podSandboxID", podSandbox.ID) } @@ -966,7 +964,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(ctx context.Context, p // GetPodStatus retrieves the status of the pod, including the // information of all containers in the pod that are visible in Runtime. -func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) { +func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) { // Now we retain restart count of container as a container label. Each time a container // restarts, pod will read the restart count from the registered dead container, increment // it to get the new restart count, and then add a label with the new restart count on @@ -980,7 +978,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety // Anyhow, we only promised "best-effort" restart count reporting, we can just ignore // these limitations now. // TODO: move this comment to SyncPod. - podSandboxIDs, err := m.getSandboxIDByPodUID(ctx, uid, nil) + podSandboxIDs, err := m.getSandboxIDByPodUID(uid, nil) if err != nil { return nil, err } @@ -1000,7 +998,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety sandboxStatuses := []*runtimeapi.PodSandboxStatus{} podIPs := []string{} for idx, podSandboxID := range podSandboxIDs { - resp, err := m.runtimeService.PodSandboxStatus(ctx, podSandboxID, false) + resp, err := m.runtimeService.PodSandboxStatus(podSandboxID, false) // Between List (getSandboxIDByPodUID) and check (PodSandboxStatus) another thread might remove a container, and that is normal. // The previous call (getSandboxIDByPodUID) never fails due to a pod sandbox not existing. // Therefore, this method should not either, but instead act as if the previous call failed, @@ -1024,7 +1022,7 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety } // Get statuses of all containers visible in the pod. - containerStatuses, err := m.getPodContainerStatuses(ctx, uid, name, namespace) + containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace) if err != nil { if m.logReduction.ShouldMessageBePrinted(err.Error(), podFullName) { klog.ErrorS(err, "getPodContainerStatuses for pod failed", "pod", klog.KObj(pod)) @@ -1044,17 +1042,17 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(ctx context.Context, uid kubety } // GarbageCollect removes dead containers using the specified container gc policy. -func (m *kubeGenericRuntimeManager) GarbageCollect(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { - return m.containerGC.GarbageCollect(ctx, gcPolicy, allSourcesReady, evictNonDeletedPods) +func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { + return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods) } // UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim // with the podCIDR supplied by the kubelet. -func (m *kubeGenericRuntimeManager) UpdatePodCIDR(ctx context.Context, podCIDR string) error { +func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error { // TODO(#35531): do we really want to write a method on this manager for each // field of the config? klog.InfoS("Updating runtime config through cri with podcidr", "CIDR", podCIDR) - return m.runtimeService.UpdateRuntimeConfig(ctx, + return m.runtimeService.UpdateRuntimeConfig( &runtimeapi.RuntimeConfig{ NetworkConfig: &runtimeapi.NetworkConfig{ PodCidr: podCIDR, @@ -1062,6 +1060,6 @@ func (m *kubeGenericRuntimeManager) UpdatePodCIDR(ctx context.Context, podCIDR s }) } -func (m *kubeGenericRuntimeManager) CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error { - return m.runtimeService.CheckpointContainer(ctx, options) +func (m *kubeGenericRuntimeManager) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { + return m.runtimeService.CheckpointContainer(options) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 3dc339e253b..5343d80dd41 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "fmt" "path/filepath" "reflect" @@ -162,11 +161,10 @@ func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates // makeFakeContainer creates a fake container based on a container template. func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer { - ctx := context.Background() sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt) assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template) - containerConfig, _, err := m.generateContainerConfig(ctx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil) + containerConfig, _, err := m.generateContainerConfig(template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil) assert.NoError(t, err, "generateContainerConfig for container template %+v", template) podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata) @@ -283,11 +281,10 @@ func TestNewKubeRuntimeManager(t *testing.T) { } func TestVersion(t *testing.T) { - ctx := context.Background() _, _, m, err := createTestRuntimeManager() assert.NoError(t, err) - version, err := m.Version(ctx) + version, err := m.Version() assert.NoError(t, err) assert.Equal(t, kubeRuntimeAPIVersion, version.String()) } @@ -301,7 +298,6 @@ func TestContainerRuntimeType(t *testing.T) { } func TestGetPodStatus(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -331,7 +327,7 @@ func TestGetPodStatus(t *testing.T) { // Set fake sandbox and faked containers to fakeRuntime. makeAndSetFakePod(t, m, fakeRuntime, pod) - podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) assert.Equal(t, pod.UID, podStatus.ID) assert.Equal(t, pod.Name, podStatus.Name) @@ -340,7 +336,6 @@ func TestGetPodStatus(t *testing.T) { } func TestStopContainerWithNotFoundError(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -370,16 +365,15 @@ func TestStopContainerWithNotFoundError(t *testing.T) { // Set fake sandbox and faked containers to fakeRuntime. makeAndSetFakePod(t, m, fakeRuntime, pod) fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container")) - podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) require.NoError(t, err) p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus) gracePeriod := int64(1) - err = m.KillPod(ctx, pod, p, &gracePeriod) + err = m.KillPod(pod, p, &gracePeriod) require.NoError(t, err) } func TestGetPodStatusWithNotFoundError(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -409,7 +403,7 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) { // Set fake sandbox and faked containers to fakeRuntime. makeAndSetFakePod(t, m, fakeRuntime, pod) fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container")) - podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) require.NoError(t, err) require.Equal(t, pod.UID, podStatus.ID) require.Equal(t, pod.Name, podStatus.Name) @@ -418,7 +412,6 @@ func TestGetPodStatusWithNotFoundError(t *testing.T) { } func TestGetPods(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -487,7 +480,7 @@ func TestGetPods(t *testing.T) { }, } - actual, err := m.GetPods(ctx, false) + actual, err := m.GetPods(false) assert.NoError(t, err) if !verifyPods(expected, actual) { @@ -496,7 +489,6 @@ func TestGetPods(t *testing.T) { } func TestGetPodsSorted(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -514,7 +506,7 @@ func TestGetPodsSorted(t *testing.T) { } fakeRuntime.SetFakeSandboxes(fakeSandboxes) - actual, err := m.GetPods(ctx, false) + actual, err := m.GetPods(false) assert.NoError(t, err) assert.Len(t, actual, 3) @@ -526,7 +518,6 @@ func TestGetPodsSorted(t *testing.T) { } func TestKillPod(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -593,7 +584,7 @@ func TestKillPod(t *testing.T) { }, } - err = m.KillPod(ctx, pod, runningPod, nil) + err = m.KillPod(pod, runningPod, nil) assert.NoError(t, err) assert.Equal(t, 3, len(fakeRuntime.Containers)) assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) @@ -633,7 +624,7 @@ func TestSyncPod(t *testing.T) { } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) - result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) + result := m.SyncPod(pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, 2, len(fakeRuntime.Containers)) assert.Equal(t, 2, len(fakeImage.Images)) @@ -693,7 +684,7 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) { } backOff := flowcontrol.NewBackOff(time.Second, time.Minute) - result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) + result := m.SyncPod(pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls) for _, sandbox := range fakeRuntime.Sandboxes { @@ -705,7 +696,6 @@ func TestSyncPodWithConvertedPodSysctls(t *testing.T) { } func TestPruneInitContainers(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -732,10 +722,10 @@ func TestPruneInitContainers(t *testing.T) { } fakes := makeFakeContainers(t, m, templates) fakeRuntime.SetFakeContainers(fakes) - podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - m.pruneInitContainersBeforeStart(ctx, pod, podStatus) + m.pruneInitContainersBeforeStart(pod, podStatus) expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id) if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok { t.Errorf("expected %v, got %v", expectedContainers, actual) @@ -743,7 +733,6 @@ func TestPruneInitContainers(t *testing.T) { } func TestSyncPodWithInitContainers(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) @@ -781,9 +770,9 @@ func TestSyncPodWithInitContainers(t *testing.T) { backOff := flowcontrol.NewBackOff(time.Second, time.Minute) // 1. should only create the init container. - podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff) + result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) expected := []*cRecord{ {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, @@ -791,24 +780,24 @@ func TestSyncPodWithInitContainers(t *testing.T) { verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container") // 2. should not create app container because init container is still running. - podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result = m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff) + result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing") // 3. should create all app containers because init container finished. // Stop init container instance 0. - sandboxIDs, err := m.getSandboxIDByPodUID(ctx, pod.UID, nil) + sandboxIDs, err := m.getSandboxIDByPodUID(pod.UID, nil) require.NoError(t, err) sandboxID := sandboxIDs[0] initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0) require.NoError(t, err) - fakeRuntime.StopContainer(ctx, initID0, 0) + fakeRuntime.StopContainer(initID0, 0) // Sync again. - podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff) + result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) expected = []*cRecord{ {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, @@ -819,11 +808,11 @@ func TestSyncPodWithInitContainers(t *testing.T) { // 4. should restart the init container if needed to create a new podsandbox // Stop the pod sandbox. - fakeRuntime.StopPodSandbox(ctx, sandboxID) + fakeRuntime.StopPodSandbox(sandboxID) // Sync again. - podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff) + result = m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) assert.NoError(t, result.Error()) expected = []*cRecord{ // The first init container instance is purged and no longer visible. @@ -1552,7 +1541,6 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) { } func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() assert.NoError(t, err) fakeRuntime.ErrorOnSandboxCreate = true @@ -1581,9 +1569,9 @@ func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) { // GetPodStatus and the following SyncPod will not return errors in the // case where the pod has been deleted. We are not adding any pods into // the fakePodProvider so they are 'deleted'. - podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace) assert.NoError(t, err) - result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff) + result := m.SyncPod(pod, podStatus, []v1.Secret{}, backOff) // This will return an error if the pod has _not_ been deleted. assert.NoError(t, result.Error()) } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go index 5e312a63563..ee209f53ef1 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "fmt" "net/url" "runtime" @@ -36,7 +35,7 @@ import ( ) // createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error). -func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v1.Pod, attempt uint32) (string, string, error) { +func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32) (string, string, error) { podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt) if err != nil { message := fmt.Sprintf("Failed to generate sandbox config for pod %q: %v", format.Pod(pod), err) @@ -64,7 +63,7 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(ctx context.Context, pod *v } } - podSandBoxID, err := m.runtimeService.RunPodSandbox(ctx, podSandboxConfig, runtimeHandler) + podSandBoxID, err := m.runtimeService.RunPodSandbox(podSandboxConfig, runtimeHandler) if err != nil { message := fmt.Sprintf("Failed to create sandbox for pod %q: %v", format.Pod(pod), err) klog.ErrorS(err, "Failed to create sandbox for pod", "pod", klog.KObj(pod)) @@ -273,7 +272,7 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxWindowsConfig(pod *v1.Pod) } // getKubeletSandboxes lists all (or just the running) sandboxes managed by kubelet. -func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all bool) ([]*runtimeapi.PodSandbox, error) { +func (m *kubeGenericRuntimeManager) getKubeletSandboxes(all bool) ([]*runtimeapi.PodSandbox, error) { var filter *runtimeapi.PodSandboxFilter if !all { readyState := runtimeapi.PodSandboxState_SANDBOX_READY @@ -284,7 +283,7 @@ func (m *kubeGenericRuntimeManager) getKubeletSandboxes(ctx context.Context, all } } - resp, err := m.runtimeService.ListPodSandbox(ctx, filter) + resp, err := m.runtimeService.ListPodSandbox(filter) if err != nil { klog.ErrorS(err, "Failed to list pod sandboxes") return nil, err @@ -327,7 +326,7 @@ func (m *kubeGenericRuntimeManager) determinePodSandboxIPs(podNamespace, podName // getPodSandboxID gets the sandbox id by podUID and returns ([]sandboxID, error). // Param state could be nil in order to get all sandboxes belonging to same pod. -func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) { +func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(podUID kubetypes.UID, state *runtimeapi.PodSandboxState) ([]string, error) { filter := &runtimeapi.PodSandboxFilter{ LabelSelector: map[string]string{types.KubernetesPodUIDLabel: string(podUID)}, } @@ -336,7 +335,7 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, po State: *state, } } - sandboxes, err := m.runtimeService.ListPodSandbox(ctx, filter) + sandboxes, err := m.runtimeService.ListPodSandbox(filter) if err != nil { klog.ErrorS(err, "Failed to list sandboxes for pod", "podUID", podUID) return nil, err @@ -357,8 +356,8 @@ func (m *kubeGenericRuntimeManager) getSandboxIDByPodUID(ctx context.Context, po } // GetPortForward gets the endpoint the runtime will serve the port-forward request from. -func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) { - sandboxIDs, err := m.getSandboxIDByPodUID(ctx, podUID, nil) +func (m *kubeGenericRuntimeManager) GetPortForward(podName, podNamespace string, podUID kubetypes.UID, ports []int32) (*url.URL, error) { + sandboxIDs, err := m.getSandboxIDByPodUID(podUID, nil) if err != nil { return nil, fmt.Errorf("failed to find sandboxID for pod %s: %v", format.PodDesc(podName, podNamespace, podUID), err) } @@ -369,7 +368,7 @@ func (m *kubeGenericRuntimeManager) GetPortForward(ctx context.Context, podName, PodSandboxId: sandboxIDs[0], Port: ports, } - resp, err := m.runtimeService.PortForward(ctx, req) + resp, err := m.runtimeService.PortForward(req) if err != nil { return nil, err } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go index d3b47063ebd..cd90a72bdfb 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_sandbox_test.go @@ -17,7 +17,6 @@ limitations under the License. package kuberuntime import ( - "context" "fmt" "os" "path/filepath" @@ -36,7 +35,6 @@ import ( // TestCreatePodSandbox tests creating sandbox and its corresponding pod log directory. func TestCreatePodSandbox(t *testing.T) { - ctx := context.Background() fakeRuntime, _, m, err := createTestRuntimeManager() require.NoError(t, err) pod := newTestPod() @@ -48,10 +46,10 @@ func TestCreatePodSandbox(t *testing.T) { assert.Equal(t, os.FileMode(0755), perm) return nil } - id, _, err := m.createPodSandbox(ctx, pod, 1) + id, _, err := m.createPodSandbox(pod, 1) assert.NoError(t, err) assert.Contains(t, fakeRuntime.Called, "RunPodSandbox") - sandboxes, err := fakeRuntime.ListPodSandbox(ctx, &runtimeapi.PodSandboxFilter{Id: id}) + sandboxes, err := fakeRuntime.ListPodSandbox(&runtimeapi.PodSandboxFilter{Id: id}) assert.NoError(t, err) assert.Equal(t, len(sandboxes), 1) // TODO Check pod sandbox configuration @@ -102,7 +100,6 @@ func TestGeneratePodSandboxLinuxConfigSeccomp(t *testing.T) { // TestCreatePodSandbox_RuntimeClass tests creating sandbox with RuntimeClasses enabled. func TestCreatePodSandbox_RuntimeClass(t *testing.T) { - ctx := context.Background() rcm := runtimeclass.NewManager(rctest.NewPopulatedClient()) defer rctest.StartManagerSync(rcm)() @@ -125,7 +122,7 @@ func TestCreatePodSandbox_RuntimeClass(t *testing.T) { pod := newTestPod() pod.Spec.RuntimeClassName = test.rcn - id, _, err := m.createPodSandbox(ctx, pod, 1) + id, _, err := m.createPodSandbox(pod, 1) if test.expectError { assert.Error(t, err) } else { diff --git a/pkg/kubelet/kuberuntime/logs/logs.go b/pkg/kubelet/kuberuntime/logs/logs.go index a2dedb08f80..fc9a46665ee 100644 --- a/pkg/kubelet/kuberuntime/logs/logs.go +++ b/pkg/kubelet/kuberuntime/logs/logs.go @@ -419,8 +419,8 @@ func ReadLogs(ctx context.Context, path, containerID string, opts *LogOptions, r } } -func isContainerRunning(ctx context.Context, id string, r internalapi.RuntimeService) (bool, error) { - resp, err := r.ContainerStatus(ctx, id, false) +func isContainerRunning(id string, r internalapi.RuntimeService) (bool, error) { + resp, err := r.ContainerStatus(id, false) if err != nil { return false, err } @@ -443,7 +443,7 @@ func isContainerRunning(ctx context.Context, id string, r internalapi.RuntimeSer // the error is error happens during waiting new logs. func waitLogs(ctx context.Context, id string, w *fsnotify.Watcher, runtimeService internalapi.RuntimeService) (bool, bool, error) { // no need to wait if the pod is not running - if running, err := isContainerRunning(ctx, id, runtimeService); !running { + if running, err := isContainerRunning(id, runtimeService); !running { return false, false, err } errRetry := 5 diff --git a/pkg/kubelet/lifecycle/handlers.go b/pkg/kubelet/lifecycle/handlers.go index 910c7a42edc..66bb834d3b3 100644 --- a/pkg/kubelet/lifecycle/handlers.go +++ b/pkg/kubelet/lifecycle/handlers.go @@ -54,7 +54,7 @@ type handlerRunner struct { } type podStatusProvider interface { - GetPodStatus(ctx context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) + GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) } // NewHandlerRunner returns a configured lifecycle handler for a container. @@ -67,19 +67,19 @@ func NewHandlerRunner(httpDoer kubetypes.HTTPDoer, commandRunner kubecontainer.C } } -func (hr *handlerRunner) Run(ctx context.Context, containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) { +func (hr *handlerRunner) Run(containerID kubecontainer.ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler) (string, error) { switch { case handler.Exec != nil: var msg string // TODO(tallclair): Pass a proper timeout value. - output, err := hr.commandRunner.RunInContainer(ctx, containerID, handler.Exec.Command, 0) + output, err := hr.commandRunner.RunInContainer(containerID, handler.Exec.Command, 0) if err != nil { msg = fmt.Sprintf("Exec lifecycle hook (%v) for Container %q in Pod %q failed - error: %v, message: %q", handler.Exec.Command, container.Name, format.Pod(pod), err, string(output)) klog.V(1).ErrorS(err, "Exec lifecycle hook for Container in Pod failed", "execCommand", handler.Exec.Command, "containerName", container.Name, "pod", klog.KObj(pod), "message", string(output)) } return msg, err case handler.HTTPGet != nil: - err := hr.runHTTPHandler(ctx, pod, container, handler, hr.eventRecorder) + err := hr.runHTTPHandler(pod, container, handler, hr.eventRecorder) var msg string if err != nil { msg = fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", handler.HTTPGet.Path, container.Name, format.Pod(pod), err) @@ -117,11 +117,11 @@ func resolvePort(portReference intstr.IntOrString, container *v1.Container) (int return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container) } -func (hr *handlerRunner) runHTTPHandler(ctx context.Context, pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error { +func (hr *handlerRunner) runHTTPHandler(pod *v1.Pod, container *v1.Container, handler *v1.LifecycleHandler, eventRecorder record.EventRecorder) error { host := handler.HTTPGet.Host podIP := host if len(host) == 0 { - status, err := hr.containerManager.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + status, err := hr.containerManager.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { klog.ErrorS(err, "Unable to get pod info, event handlers may be invalid.", "pod", klog.KObj(pod)) return err diff --git a/pkg/kubelet/lifecycle/handlers_test.go b/pkg/kubelet/lifecycle/handlers_test.go index 962962dd95b..5d017b89df3 100644 --- a/pkg/kubelet/lifecycle/handlers_test.go +++ b/pkg/kubelet/lifecycle/handlers_test.go @@ -17,7 +17,6 @@ limitations under the License. package lifecycle import ( - "context" "fmt" "io" "net" @@ -95,7 +94,7 @@ type fakeContainerCommandRunner struct { Msg string } -func (f *fakeContainerCommandRunner) RunInContainer(_ context.Context, id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { +func (f *fakeContainerCommandRunner) RunInContainer(id kubecontainer.ContainerID, cmd []string, timeout time.Duration) ([]byte, error) { f.Cmd = cmd f.ID = id return []byte(f.Msg), f.Err @@ -114,12 +113,11 @@ func stubPodStatusProvider(podIP string) podStatusProvider { type podStatusProviderFunc func(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) -func (f podStatusProviderFunc) GetPodStatus(_ context.Context, uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { +func (f podStatusProviderFunc) GetPodStatus(uid types.UID, name, namespace string) (*kubecontainer.PodStatus, error) { return f(uid, name, namespace) } func TestRunHandlerExec(t *testing.T) { - ctx := context.Background() fakeCommandRunner := fakeContainerCommandRunner{} handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil) @@ -141,7 +139,7 @@ func TestRunHandlerExec(t *testing.T) { pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Namespace = "nsFoo" pod.Spec.Containers = []v1.Container{container} - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -165,7 +163,6 @@ func (f *fakeHTTP) Do(req *http.Request) (*http.Response, error) { } func TestRunHandlerHttp(t *testing.T) { - ctx := context.Background() fakeHTTPGetter := fakeHTTP{} fakePodStatusProvider := stubPodStatusProvider("127.0.0.1") handlerRunner := NewHandlerRunner(&fakeHTTPGetter, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) @@ -190,7 +187,7 @@ func TestRunHandlerHttp(t *testing.T) { pod.ObjectMeta.Namespace = "nsFoo" pod.ObjectMeta.UID = "foo-bar-quux" pod.Spec.Containers = []v1.Container{container} - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) @@ -201,7 +198,6 @@ func TestRunHandlerHttp(t *testing.T) { } func TestRunHandlerHttpWithHeaders(t *testing.T) { - ctx := context.Background() fakeHTTPDoer := fakeHTTP{} fakePodStatusProvider := stubPodStatusProvider("127.0.0.1") @@ -229,7 +225,7 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) { pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Namespace = "nsFoo" pod.Spec.Containers = []v1.Container{container} - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) @@ -243,7 +239,6 @@ func TestRunHandlerHttpWithHeaders(t *testing.T) { } func TestRunHandlerHttps(t *testing.T) { - ctx := context.Background() fakeHTTPDoer := fakeHTTP{} fakePodStatusProvider := stubPodStatusProvider("127.0.0.1") handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) @@ -271,7 +266,7 @@ func TestRunHandlerHttps(t *testing.T) { t.Run("consistent", func(t *testing.T) { container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70") pod.Spec.Containers = []v1.Container{container} - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) @@ -285,7 +280,7 @@ func TestRunHandlerHttps(t *testing.T) { defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)() container.Lifecycle.PostStart.HTTPGet.Port = intstr.FromString("70") pod.Spec.Containers = []v1.Container{container} - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) @@ -352,14 +347,13 @@ func TestRunHandlerHTTPPort(t *testing.T) { for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { - ctx := context.Background() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, tt.FeatureGateEnabled)() fakeHTTPDoer := fakeHTTP{} handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) container.Lifecycle.PostStart.HTTPGet.Port = tt.Port pod.Spec.Containers = []v1.Container{container} - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if hasError := (err != nil); hasError != tt.ExpectError { t.Errorf("unexpected error: %v", err) @@ -624,7 +618,6 @@ func TestRunHTTPHandler(t *testing.T) { for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { - ctx := context.Background() fakePodStatusProvider := stubPodStatusProvider(tt.PodIP) container.Lifecycle.PostStart.HTTPGet = tt.HTTPGet @@ -634,7 +627,7 @@ func TestRunHTTPHandler(t *testing.T) { fakeHTTPDoer := fakeHTTP{} handlerRunner := NewHandlerRunner(&fakeHTTPDoer, &fakeContainerCommandRunner{}, fakePodStatusProvider, nil) - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Fatal(err) } @@ -661,7 +654,6 @@ func TestRunHTTPHandler(t *testing.T) { } func TestRunHandlerNil(t *testing.T) { - ctx := context.Background() handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeContainerCommandRunner{}, nil, nil) containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"} podName := "podFoo" @@ -678,14 +670,13 @@ func TestRunHandlerNil(t *testing.T) { pod.ObjectMeta.Name = podName pod.ObjectMeta.Namespace = podNamespace pod.Spec.Containers = []v1.Container{container} - _, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + _, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err == nil { t.Errorf("expect error, but got nil") } } func TestRunHandlerExecFailure(t *testing.T) { - ctx := context.Background() expectedErr := fmt.Errorf("invalid command") fakeCommandRunner := fakeContainerCommandRunner{Err: expectedErr, Msg: expectedErr.Error()} handlerRunner := NewHandlerRunner(&fakeHTTP{}, &fakeCommandRunner, nil, nil) @@ -710,7 +701,7 @@ func TestRunHandlerExecFailure(t *testing.T) { pod.ObjectMeta.Namespace = "nsFoo" pod.Spec.Containers = []v1.Container{container} expectedErrMsg := fmt.Sprintf("Exec lifecycle hook (%s) for Container %q in Pod %q failed - error: %v, message: %q", command, containerName, format.Pod(&pod), expectedErr, expectedErr.Error()) - msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err == nil { t.Errorf("expected error: %v", expectedErr) } @@ -720,7 +711,6 @@ func TestRunHandlerExecFailure(t *testing.T) { } func TestRunHandlerHttpFailure(t *testing.T) { - ctx := context.Background() expectedErr := fmt.Errorf("fake http error") expectedResp := http.Response{ Body: io.NopCloser(strings.NewReader(expectedErr.Error())), @@ -750,7 +740,7 @@ func TestRunHandlerHttpFailure(t *testing.T) { pod.ObjectMeta.Namespace = "nsFoo" pod.Spec.Containers = []v1.Container{container} expectedErrMsg := fmt.Sprintf("HTTP lifecycle hook (%s) for Container %q in Pod %q failed - error: %v", "bar", containerName, format.Pod(&pod), expectedErr) - msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err == nil { t.Errorf("expected error: %v", expectedErr) } @@ -763,7 +753,6 @@ func TestRunHandlerHttpFailure(t *testing.T) { } func TestRunHandlerHttpsFailureFallback(t *testing.T) { - ctx := context.Background() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, true)() // Since prometheus' gatherer is global, other tests may have updated metrics already, so @@ -814,7 +803,7 @@ func TestRunHandlerHttpsFailureFallback(t *testing.T) { pod.ObjectMeta.Name = "podFoo" pod.ObjectMeta.Namespace = "nsFoo" pod.Spec.Containers = []v1.Container{container} - msg, err := handlerRunner.Run(ctx, containerID, &pod, &container, container.Lifecycle.PostStart) + msg, err := handlerRunner.Run(containerID, &pod, &container, container.Lifecycle.PostStart) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/kubelet/logs/container_log_manager.go b/pkg/kubelet/logs/container_log_manager.go index b3d6b063160..a5247607ea3 100644 --- a/pkg/kubelet/logs/container_log_manager.go +++ b/pkg/kubelet/logs/container_log_manager.go @@ -18,7 +18,6 @@ package logs import ( "compress/gzip" - "context" "fmt" "io" "os" @@ -59,7 +58,7 @@ type ContainerLogManager interface { // Start container log manager. Start() // Clean removes all logs of specified container. - Clean(ctx context.Context, containerID string) error + Clean(containerID string) error } // LogRotatePolicy is a policy for container log rotation. The policy applies to all @@ -178,20 +177,19 @@ func NewContainerLogManager(runtimeService internalapi.RuntimeService, osInterfa // Start the container log manager. func (c *containerLogManager) Start() { - ctx := context.Background() // Start a goroutine periodically does container log rotation. go wait.Forever(func() { - if err := c.rotateLogs(ctx); err != nil { + if err := c.rotateLogs(); err != nil { klog.ErrorS(err, "Failed to rotate container logs") } }, logMonitorPeriod) } // Clean removes all logs of specified container (including rotated one). -func (c *containerLogManager) Clean(ctx context.Context, containerID string) error { +func (c *containerLogManager) Clean(containerID string) error { c.mutex.Lock() defer c.mutex.Unlock() - resp, err := c.runtimeService.ContainerStatus(ctx, containerID, false) + resp, err := c.runtimeService.ContainerStatus(containerID, false) if err != nil { return fmt.Errorf("failed to get container status %q: %v", containerID, err) } @@ -213,11 +211,11 @@ func (c *containerLogManager) Clean(ctx context.Context, containerID string) err return nil } -func (c *containerLogManager) rotateLogs(ctx context.Context) error { +func (c *containerLogManager) rotateLogs() error { c.mutex.Lock() defer c.mutex.Unlock() // TODO(#59998): Use kubelet pod cache. - containers, err := c.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{}) + containers, err := c.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) if err != nil { return fmt.Errorf("failed to list containers: %v", err) } @@ -230,7 +228,7 @@ func (c *containerLogManager) rotateLogs(ctx context.Context) error { } id := container.GetId() // Note that we should not block log rotate for an error of a single container. - resp, err := c.runtimeService.ContainerStatus(ctx, id, false) + resp, err := c.runtimeService.ContainerStatus(id, false) if err != nil { klog.ErrorS(err, "Failed to get container status", "containerID", id) continue @@ -249,7 +247,7 @@ func (c *containerLogManager) rotateLogs(ctx context.Context) error { // In rotateLatestLog, there are several cases that we may // lose original container log after ReopenContainerLog fails. // We try to recover it by reopening container log. - if err := c.runtimeService.ReopenContainerLog(ctx, id); err != nil { + if err := c.runtimeService.ReopenContainerLog(id); err != nil { klog.ErrorS(err, "Container log doesn't exist, reopen container log failed", "containerID", id, "path", path) continue } @@ -264,7 +262,7 @@ func (c *containerLogManager) rotateLogs(ctx context.Context) error { continue } // Perform log rotation. - if err := c.rotateLog(ctx, id, path); err != nil { + if err := c.rotateLog(id, path); err != nil { klog.ErrorS(err, "Failed to rotate log for container", "path", path, "containerID", id) continue } @@ -272,7 +270,7 @@ func (c *containerLogManager) rotateLogs(ctx context.Context) error { return nil } -func (c *containerLogManager) rotateLog(ctx context.Context, id, log string) error { +func (c *containerLogManager) rotateLog(id, log string) error { // pattern is used to match all rotated files. pattern := fmt.Sprintf("%s.*", log) logs, err := filepath.Glob(pattern) @@ -300,7 +298,7 @@ func (c *containerLogManager) rotateLog(ctx context.Context, id, log string) err } } - if err := c.rotateLatestLog(ctx, id, log); err != nil { + if err := c.rotateLatestLog(id, log); err != nil { return fmt.Errorf("failed to rotate log %q: %v", log, err) } @@ -412,13 +410,13 @@ func (c *containerLogManager) compressLog(log string) error { // rotateLatestLog rotates latest log without compression, so that container can still write // and fluentd can finish reading. -func (c *containerLogManager) rotateLatestLog(ctx context.Context, id, log string) error { +func (c *containerLogManager) rotateLatestLog(id, log string) error { timestamp := c.clock.Now().Format(timestampFormat) rotated := fmt.Sprintf("%s.%s", log, timestamp) if err := c.osInterface.Rename(log, rotated); err != nil { return fmt.Errorf("failed to rotate log %q to %q: %v", log, rotated, err) } - if err := c.runtimeService.ReopenContainerLog(ctx, id); err != nil { + if err := c.runtimeService.ReopenContainerLog(id); err != nil { // Rename the rotated log back, so that we can try rotating it again // next round. // If kubelet gets restarted at this point, we'll lose original log. diff --git a/pkg/kubelet/logs/container_log_manager_stub.go b/pkg/kubelet/logs/container_log_manager_stub.go index f0a2ef9fdf2..27db1e42cbf 100644 --- a/pkg/kubelet/logs/container_log_manager_stub.go +++ b/pkg/kubelet/logs/container_log_manager_stub.go @@ -16,13 +16,11 @@ limitations under the License. package logs -import "context" - type containerLogManagerStub struct{} func (*containerLogManagerStub) Start() {} -func (*containerLogManagerStub) Clean(ctx context.Context, containerID string) error { +func (*containerLogManagerStub) Clean(containerID string) error { return nil } diff --git a/pkg/kubelet/logs/container_log_manager_test.go b/pkg/kubelet/logs/container_log_manager_test.go index 59fc3da073c..6478088494c 100644 --- a/pkg/kubelet/logs/container_log_manager_test.go +++ b/pkg/kubelet/logs/container_log_manager_test.go @@ -18,7 +18,6 @@ package logs import ( "bytes" - "context" "fmt" "io" "os" @@ -75,7 +74,6 @@ func TestGetAllLogs(t *testing.T) { } func TestRotateLogs(t *testing.T) { - ctx := context.Background() dir, err := os.MkdirTemp("", "test-rotate-logs") require.NoError(t, err) defer os.RemoveAll(dir) @@ -149,7 +147,7 @@ func TestRotateLogs(t *testing.T) { }, } f.SetFakeContainers(testContainers) - require.NoError(t, c.rotateLogs(ctx)) + require.NoError(t, c.rotateLogs()) timestamp := now.Format(timestampFormat) logs, err := os.ReadDir(dir) @@ -163,7 +161,6 @@ func TestRotateLogs(t *testing.T) { } func TestClean(t *testing.T) { - ctx := context.Background() dir, err := os.MkdirTemp("", "test-clean") require.NoError(t, err) defer os.RemoveAll(dir) @@ -222,7 +219,7 @@ func TestClean(t *testing.T) { } f.SetFakeContainers(testContainers) - err = c.Clean(ctx, "container-3") + err = c.Clean("container-3") require.NoError(t, err) logs, err := os.ReadDir(dir) @@ -353,7 +350,6 @@ func TestCompressLog(t *testing.T) { } func TestRotateLatestLog(t *testing.T) { - ctx := context.Background() dir, err := os.MkdirTemp("", "test-rotate-latest-log") require.NoError(t, err) defer os.RemoveAll(dir) @@ -397,7 +393,7 @@ func TestRotateLatestLog(t *testing.T) { defer testFile.Close() testLog := testFile.Name() rotatedLog := fmt.Sprintf("%s.%s", testLog, now.Format(timestampFormat)) - err = c.rotateLatestLog(ctx, "test-id", testLog) + err = c.rotateLatestLog("test-id", testLog) assert.Equal(t, test.expectError, err != nil) _, err = os.Stat(testLog) assert.Equal(t, test.expectOriginal, err == nil) diff --git a/pkg/kubelet/metrics/collectors/log_metrics.go b/pkg/kubelet/metrics/collectors/log_metrics.go index 4b2237fbbad..c542c5ba3da 100644 --- a/pkg/kubelet/metrics/collectors/log_metrics.go +++ b/pkg/kubelet/metrics/collectors/log_metrics.go @@ -17,8 +17,6 @@ limitations under the License. package collectors import ( - "context" - "k8s.io/component-base/metrics" "k8s.io/klog/v2" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" @@ -42,7 +40,7 @@ var ( type logMetricsCollector struct { metrics.BaseStableCollector - podStats func(ctx context.Context) ([]statsapi.PodStats, error) + podStats func() ([]statsapi.PodStats, error) } // Check if logMetricsCollector implements necessary interface @@ -50,7 +48,7 @@ var _ metrics.StableCollector = &logMetricsCollector{} // NewLogMetricsCollector implements the metrics.StableCollector interface and // exposes metrics about container's log volume size. -func NewLogMetricsCollector(podStats func(ctx context.Context) ([]statsapi.PodStats, error)) metrics.StableCollector { +func NewLogMetricsCollector(podStats func() ([]statsapi.PodStats, error)) metrics.StableCollector { return &logMetricsCollector{ podStats: podStats, } @@ -63,7 +61,7 @@ func (c *logMetricsCollector) DescribeWithStability(ch chan<- *metrics.Desc) { // CollectWithStability implements the metrics.StableCollector interface. func (c *logMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) { - podStats, err := c.podStats(context.Background()) + podStats, err := c.podStats() if err != nil { klog.ErrorS(err, "Failed to get pod stats") return diff --git a/pkg/kubelet/metrics/collectors/log_metrics_test.go b/pkg/kubelet/metrics/collectors/log_metrics_test.go index fa3c297233c..a4efce64076 100644 --- a/pkg/kubelet/metrics/collectors/log_metrics_test.go +++ b/pkg/kubelet/metrics/collectors/log_metrics_test.go @@ -17,7 +17,6 @@ limitations under the License. package collectors import ( - "context" "strings" "testing" @@ -30,7 +29,7 @@ func TestNoMetricsCollected(t *testing.T) { descLogSize = descLogSize.GetRawDesc() collector := &logMetricsCollector{ - podStats: func(_ context.Context) ([]statsapi.PodStats, error) { + podStats: func() ([]statsapi.PodStats, error) { return []statsapi.PodStats{}, nil }, } @@ -46,7 +45,7 @@ func TestMetricsCollected(t *testing.T) { size := uint64(18) collector := &logMetricsCollector{ - podStats: func(_ context.Context) ([]statsapi.PodStats, error) { + podStats: func() ([]statsapi.PodStats, error) { return []statsapi.PodStats{ { PodRef: statsapi.PodReference{ diff --git a/pkg/kubelet/metrics/collectors/resource_metrics.go b/pkg/kubelet/metrics/collectors/resource_metrics.go index ab6ae934073..b5a3c963401 100644 --- a/pkg/kubelet/metrics/collectors/resource_metrics.go +++ b/pkg/kubelet/metrics/collectors/resource_metrics.go @@ -17,7 +17,6 @@ limitations under the License. package collectors import ( - "context" "time" "k8s.io/component-base/metrics" @@ -117,12 +116,11 @@ func (rc *resourceMetricsCollector) DescribeWithStability(ch chan<- *metrics.Des // leak metric collectors for containers or pods that no longer exist. Instead, implement // custom collector in a way that only collects metrics for active containers. func (rc *resourceMetricsCollector) CollectWithStability(ch chan<- metrics.Metric) { - ctx := context.Background() var errorCount float64 defer func() { ch <- metrics.NewLazyConstMetric(resourceScrapeResultDesc, metrics.GaugeValue, errorCount) }() - statsSummary, err := rc.provider.GetCPUAndMemoryStats(ctx) + statsSummary, err := rc.provider.GetCPUAndMemoryStats() if err != nil { errorCount = 1 klog.ErrorS(err, "Error getting summary for resourceMetric prometheus endpoint") diff --git a/pkg/kubelet/metrics/collectors/resource_metrics_test.go b/pkg/kubelet/metrics/collectors/resource_metrics_test.go index 0f2c0a26b4f..6bdfad80c27 100644 --- a/pkg/kubelet/metrics/collectors/resource_metrics_test.go +++ b/pkg/kubelet/metrics/collectors/resource_metrics_test.go @@ -17,7 +17,6 @@ limitations under the License. package collectors import ( - "context" "fmt" "strings" "testing" @@ -358,9 +357,8 @@ func TestCollectResourceMetrics(t *testing.T) { for _, test := range tests { tc := test t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() provider := summaryprovidertest.NewMockSummaryProvider(mockCtrl) - provider.EXPECT().GetCPUAndMemoryStats(ctx).Return(tc.summary, tc.summaryErr).AnyTimes() + provider.EXPECT().GetCPUAndMemoryStats().Return(tc.summary, tc.summaryErr).AnyTimes() collector := NewResourceMetricsCollector(provider) if err := testutil.CustomCollectAndCompare(collector, strings.NewReader(tc.expectedMetrics), interestedMetrics...); err != nil { diff --git a/pkg/kubelet/metrics/collectors/volume_stats.go b/pkg/kubelet/metrics/collectors/volume_stats.go index ddcb308de01..b6bf1870c4c 100644 --- a/pkg/kubelet/metrics/collectors/volume_stats.go +++ b/pkg/kubelet/metrics/collectors/volume_stats.go @@ -17,8 +17,6 @@ limitations under the License. package collectors import ( - "context" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/component-base/metrics" stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1" @@ -98,8 +96,7 @@ func (collector *volumeStatsCollector) DescribeWithStability(ch chan<- *metrics. // CollectWithStability implements the metrics.StableCollector interface. func (collector *volumeStatsCollector) CollectWithStability(ch chan<- metrics.Metric) { - ctx := context.Background() - podStats, err := collector.statsProvider.ListPodStats(ctx) + podStats, err := collector.statsProvider.ListPodStats() if err != nil { return } diff --git a/pkg/kubelet/metrics/collectors/volume_stats_test.go b/pkg/kubelet/metrics/collectors/volume_stats_test.go index afabb07d27f..1d0e825a35b 100644 --- a/pkg/kubelet/metrics/collectors/volume_stats_test.go +++ b/pkg/kubelet/metrics/collectors/volume_stats_test.go @@ -17,7 +17,6 @@ limitations under the License. package collectors import ( - "context" "strings" "testing" @@ -33,7 +32,6 @@ func newUint64Pointer(i uint64) *uint64 { } func TestVolumeStatsCollector(t *testing.T) { - ctx := context.Background() // Fixed metadata on type and help text. We prepend this to every expected // output so we only have to modify a single place when doing adjustments. const metadata = ` @@ -146,15 +144,14 @@ func TestVolumeStatsCollector(t *testing.T) { defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) - mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes() - mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil).AnyTimes() + mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes() + mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil).AnyTimes() if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } } func TestVolumeStatsCollectorWithNullVolumeStatus(t *testing.T) { - ctx := context.Background() // Fixed metadata on type and help text. We prepend this to every expected // output so we only have to modify a single place when doing adjustments. const metadata = ` @@ -234,8 +231,8 @@ func TestVolumeStatsCollectorWithNullVolumeStatus(t *testing.T) { defer mockCtrl.Finish() mockStatsProvider := statstest.NewMockProvider(mockCtrl) - mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes() - mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil).AnyTimes() + mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes() + mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil).AnyTimes() if err := testutil.CustomCollectAndCompare(&volumeStatsCollector{statsProvider: mockStatsProvider}, strings.NewReader(want), metrics...); err != nil { t.Errorf("unexpected collecting result:\n%s", err) } diff --git a/pkg/kubelet/nodestatus/setters.go b/pkg/kubelet/nodestatus/setters.go index 687e8212403..8a2ecaa6248 100644 --- a/pkg/kubelet/nodestatus/setters.go +++ b/pkg/kubelet/nodestatus/setters.go @@ -17,7 +17,6 @@ limitations under the License. package nodestatus import ( - "context" "fmt" "math" "net" @@ -55,7 +54,7 @@ const ( // Setter modifies the node in-place, and returns an error if the modification failed. // Setters may partially mutate the node before returning an error. -type Setter func(ctx context.Context, node *v1.Node) error +type Setter func(node *v1.Node) error // NodeAddress returns a Setter that updates address-related information on the node. func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs @@ -79,7 +78,7 @@ func NodeAddress(nodeIPs []net.IP, // typically Kubelet.nodeIPs } secondaryNodeIPSpecified := secondaryNodeIP != nil && !secondaryNodeIP.IsUnspecified() - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { if nodeIPSpecified { if err := validateNodeIPFunc(nodeIP); err != nil { return fmt.Errorf("failed to validate nodeIP: %v", err) @@ -251,7 +250,7 @@ func MachineInfo(nodeName string, recordEventFunc func(eventType, event, message string), // typically Kubelet.recordEvent localStorageCapacityIsolation bool, ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { // Note: avoid blindly overwriting the capacity in case opaque // resources are being advertised. if node.Status.Capacity == nil { @@ -380,9 +379,9 @@ func MachineInfo(nodeName string, // VersionInfo returns a Setter that updates version-related information on the node. func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // typically Kubelet.cadvisor.VersionInfo runtimeTypeFunc func() string, // typically Kubelet.containerRuntime.Type - runtimeVersionFunc func(ctx context.Context) (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version + runtimeVersionFunc func() (kubecontainer.Version, error), // typically Kubelet.containerRuntime.Version ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { verinfo, err := versionInfoFunc() if err != nil { return fmt.Errorf("error getting version info: %v", err) @@ -392,7 +391,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // node.Status.NodeInfo.OSImage = verinfo.ContainerOsVersion runtimeVersion := "Unknown" - if runtimeVer, err := runtimeVersionFunc(ctx); err == nil { + if runtimeVer, err := runtimeVersionFunc(); err == nil { runtimeVersion = runtimeVer.String() } node.Status.NodeInfo.ContainerRuntimeVersion = fmt.Sprintf("%s://%s", runtimeTypeFunc(), runtimeVersion) @@ -406,7 +405,7 @@ func VersionInfo(versionInfoFunc func() (*cadvisorapiv1.VersionInfo, error), // // DaemonEndpoints returns a Setter that updates the daemon endpoints on the node. func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { node.Status.DaemonEndpoints = *daemonEndpoints return nil } @@ -418,7 +417,7 @@ func DaemonEndpoints(daemonEndpoints *v1.NodeDaemonEndpoints) Setter { func Images(nodeStatusMaxImages int32, imageListFunc func() ([]kubecontainer.Image, error), // typically Kubelet.imageManager.GetImageList ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { // Update image list of this node var imagesOnNode []v1.ContainerImage containerImages, err := imageListFunc() @@ -453,7 +452,7 @@ func Images(nodeStatusMaxImages int32, // GoRuntime returns a Setter that sets GOOS and GOARCH on the node. func GoRuntime() Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { node.Status.NodeInfo.OperatingSystem = goruntime.GOOS node.Status.NodeInfo.Architecture = goruntime.GOARCH return nil @@ -472,7 +471,7 @@ func ReadyCondition( recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent localStorageCapacityIsolation bool, ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { // NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions. // This is due to an issue with version skewed kubelet and master components. // ref: https://github.com/kubernetes/kubernetes/issues/16961 @@ -557,7 +556,7 @@ func MemoryPressureCondition(nowFunc func() time.Time, // typically Kubelet.cloc pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderMemoryPressure recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { currentTime := metav1.NewTime(nowFunc()) var condition *v1.NodeCondition @@ -618,7 +617,7 @@ func PIDPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock.N pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderPIDPressure recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { currentTime := metav1.NewTime(nowFunc()) var condition *v1.NodeCondition @@ -679,7 +678,7 @@ func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock. pressureFunc func() bool, // typically Kubelet.evictionManager.IsUnderDiskPressure recordEventFunc func(eventType, event string), // typically Kubelet.recordNodeStatusEvent ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { currentTime := metav1.NewTime(nowFunc()) var condition *v1.NodeCondition @@ -739,7 +738,7 @@ func DiskPressureCondition(nowFunc func() time.Time, // typically Kubelet.clock. func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.ReconcilerStatesHasBeenSynced volumesInUseFunc func() []v1.UniqueVolumeName, // typically Kubelet.volumeManager.GetVolumesInUse ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { // Make sure to only update node status after reconciler starts syncing up states if syncedFunc() { node.Status.VolumesInUse = volumesInUseFunc() @@ -751,7 +750,7 @@ func VolumesInUse(syncedFunc func() bool, // typically Kubelet.volumeManager.Rec // VolumeLimits returns a Setter that updates the volume limits on the node. func VolumeLimits(volumePluginListFunc func() []volume.VolumePluginWithAttachLimits, // typically Kubelet.volumePluginMgr.ListVolumePluginWithLimits ) Setter { - return func(ctx context.Context, node *v1.Node) error { + return func(node *v1.Node) error { if node.Status.Capacity == nil { node.Status.Capacity = v1.ResourceList{} } diff --git a/pkg/kubelet/nodestatus/setters_test.go b/pkg/kubelet/nodestatus/setters_test.go index f6cb86c0f52..007e4b9385e 100644 --- a/pkg/kubelet/nodestatus/setters_test.go +++ b/pkg/kubelet/nodestatus/setters_test.go @@ -17,7 +17,6 @@ limitations under the License. package nodestatus import ( - "context" "errors" "fmt" "net" @@ -513,7 +512,6 @@ func TestNodeAddress(t *testing.T) { } for _, testCase := range cases { t.Run(testCase.name, func(t *testing.T) { - ctx := context.Background() // testCase setup existingNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ @@ -555,7 +553,7 @@ func TestNodeAddress(t *testing.T) { nodeAddressesFunc) // call setter on existing node - err := setter(ctx, existingNode) + err := setter(existingNode) if err != nil && !testCase.shouldError { t.Fatalf("unexpected error: %v", err) } else if err != nil && testCase.shouldError { @@ -600,7 +598,6 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) { } for _, testCase := range cases { t.Run(testCase.name, func(t *testing.T) { - ctx := context.Background() // testCase setup existingNode := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)}, @@ -627,7 +624,7 @@ func TestNodeAddress_NoCloudProvider(t *testing.T) { nodeAddressesFunc) // call setter on existing node - err := setter(ctx, existingNode) + err := setter(existingNode) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -1052,7 +1049,6 @@ func TestMachineInfo(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() machineInfoFunc := func() (*cadvisorapiv1.MachineInfo, error) { return tc.machineInfo, tc.machineInfoError } @@ -1079,7 +1075,7 @@ func TestMachineInfo(t *testing.T) { setter := MachineInfo(nodeName, tc.maxPods, tc.podsPerCore, machineInfoFunc, capacityFunc, devicePluginResourceCapacityFunc, nodeAllocatableReservationFunc, recordEventFunc, tc.disableLocalStorageCapacityIsolation) // call setter on node - if err := setter(ctx, tc.node); err != nil { + if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected node @@ -1157,20 +1153,19 @@ func TestVersionInfo(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() versionInfoFunc := func() (*cadvisorapiv1.VersionInfo, error) { return tc.versionInfo, tc.versionInfoError } runtimeTypeFunc := func() string { return tc.runtimeType } - runtimeVersionFunc := func(_ context.Context) (kubecontainer.Version, error) { + runtimeVersionFunc := func() (kubecontainer.Version, error) { return tc.runtimeVersion, tc.runtimeVersionError } // construct setter setter := VersionInfo(versionInfoFunc, runtimeTypeFunc, runtimeVersionFunc) // call setter on node - err := setter(ctx, tc.node) + err := setter(tc.node) require.Equal(t, tc.expectError, err) // check expected node assert.True(t, apiequality.Semantic.DeepEqual(tc.expectNode, tc.node), @@ -1234,7 +1229,6 @@ func TestImages(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() imageListFunc := func() ([]kubecontainer.Image, error) { // today, imageListFunc is expected to return a sorted list, // but we may choose to sort in the setter at some future point @@ -1246,7 +1240,7 @@ func TestImages(t *testing.T) { setter := Images(tc.maxImages, imageListFunc) // call setter on node node := &v1.Node{} - err := setter(ctx, node) + err := setter(node) require.Equal(t, tc.expectError, err) // check expected node, image list should be reset to empty when there is an error expectNode := &v1.Node{} @@ -1414,7 +1408,6 @@ func TestReadyCondition(t *testing.T) { } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() runtimeErrorsFunc := func() error { return tc.runtimeErrors } @@ -1440,7 +1433,7 @@ func TestReadyCondition(t *testing.T) { // construct setter setter := ReadyCondition(nowFunc, runtimeErrorsFunc, networkErrorsFunc, storageErrorsFunc, tc.appArmorValidateHostFunc, cmStatusFunc, nodeShutdownErrorsFunc, recordEventFunc, !tc.disableLocalStorageCapacityIsolation) // call setter on node - if err := setter(ctx, tc.node); err != nil { + if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected condition @@ -1548,7 +1541,6 @@ func TestMemoryPressureCondition(t *testing.T) { } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() events := []testEvent{} recordEventFunc := func(eventType, event string) { events = append(events, testEvent{ @@ -1562,7 +1554,7 @@ func TestMemoryPressureCondition(t *testing.T) { // construct setter setter := MemoryPressureCondition(nowFunc, pressureFunc, recordEventFunc) // call setter on node - if err := setter(ctx, tc.node); err != nil { + if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected condition @@ -1670,7 +1662,6 @@ func TestPIDPressureCondition(t *testing.T) { } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() events := []testEvent{} recordEventFunc := func(eventType, event string) { events = append(events, testEvent{ @@ -1684,7 +1675,7 @@ func TestPIDPressureCondition(t *testing.T) { // construct setter setter := PIDPressureCondition(nowFunc, pressureFunc, recordEventFunc) // call setter on node - if err := setter(ctx, tc.node); err != nil { + if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected condition @@ -1792,7 +1783,6 @@ func TestDiskPressureCondition(t *testing.T) { } for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() events := []testEvent{} recordEventFunc := func(eventType, event string) { events = append(events, testEvent{ @@ -1806,7 +1796,7 @@ func TestDiskPressureCondition(t *testing.T) { // construct setter setter := DiskPressureCondition(nowFunc, pressureFunc, recordEventFunc) // call setter on node - if err := setter(ctx, tc.node); err != nil { + if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected condition @@ -1853,7 +1843,6 @@ func TestVolumesInUse(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() syncedFunc := func() bool { return tc.synced } @@ -1863,7 +1852,7 @@ func TestVolumesInUse(t *testing.T) { // construct setter setter := VolumesInUse(syncedFunc, volumesInUseFunc) // call setter on node - if err := setter(ctx, tc.node); err != nil { + if err := setter(tc.node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected volumes @@ -1919,7 +1908,6 @@ func TestVolumeLimits(t *testing.T) { for _, tc := range cases { t.Run(tc.desc, func(t *testing.T) { - ctx := context.Background() volumePluginListFunc := func() []volume.VolumePluginWithAttachLimits { return tc.volumePluginList } @@ -1927,7 +1915,7 @@ func TestVolumeLimits(t *testing.T) { setter := VolumeLimits(volumePluginListFunc) // call setter on node node := &v1.Node{} - if err := setter(ctx, node); err != nil { + if err := setter(node); err != nil { t.Fatalf("unexpected error: %v", err) } // check expected node diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index 749d4b14493..c9ab5bb7689 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -17,7 +17,6 @@ limitations under the License. package pleg import ( - "context" "fmt" "sync/atomic" "time" @@ -189,7 +188,6 @@ func (g *GenericPLEG) updateRelistTime(timestamp time.Time) { // relist queries the container runtime for list of pods/containers, compare // with the internal pods/containers, and generates events accordingly. func (g *GenericPLEG) relist() { - ctx := context.Background() klog.V(5).InfoS("GenericPLEG: Relisting") if lastRelistTime := g.getRelistTime(); !lastRelistTime.IsZero() { @@ -202,7 +200,7 @@ func (g *GenericPLEG) relist() { }() // Get all the pods. - podList, err := g.runtime.GetPods(ctx, true) + podList, err := g.runtime.GetPods(true) if err != nil { klog.ErrorS(err, "GenericPLEG: Unable to retrieve pods") return @@ -249,7 +247,7 @@ func (g *GenericPLEG) relist() { // inspecting the pod and getting the PodStatus to update the cache // serially may take a while. We should be aware of this and // parallelize if needed. - if err := g.updateCache(ctx, pod, pid); err != nil { + if err := g.updateCache(pod, pid); err != nil { // Rely on updateCache calling GetPodStatus to log the actual error. klog.V(4).ErrorS(err, "PLEG: Ignoring events for pod", "pod", klog.KRef(pod.Namespace, pod.Name)) @@ -307,7 +305,7 @@ func (g *GenericPLEG) relist() { if len(g.podsToReinspect) > 0 { klog.V(5).InfoS("GenericPLEG: Reinspecting pods that previously failed inspection") for pid, pod := range g.podsToReinspect { - if err := g.updateCache(ctx, pod, pid); err != nil { + if err := g.updateCache(pod, pid); err != nil { // Rely on updateCache calling GetPodStatus to log the actual error. klog.V(5).ErrorS(err, "PLEG: pod failed reinspection", "pod", klog.KRef(pod.Namespace, pod.Name)) needsReinspection[pid] = pod @@ -390,7 +388,7 @@ func (g *GenericPLEG) getPodIPs(pid types.UID, status *kubecontainer.PodStatus) return oldStatus.IPs } -func (g *GenericPLEG) updateCache(ctx context.Context, pod *kubecontainer.Pod, pid types.UID) error { +func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error { if pod == nil { // The pod is missing in the current relist. This means that // the pod has no visible (active or inactive) containers. @@ -402,7 +400,7 @@ func (g *GenericPLEG) updateCache(ctx context.Context, pod *kubecontainer.Pod, p // TODO: Consider adding a new runtime method // GetPodStatus(pod *kubecontainer.Pod) so that Docker can avoid listing // all containers again. - status, err := g.runtime.GetPodStatus(ctx, pod.ID, pod.Name, pod.Namespace) + status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace) if err != nil { // nolint:logcheck // Not using the result of klog.V inside the // if branch is okay, we just use it to determine whether the diff --git a/pkg/kubelet/pleg/generic_test.go b/pkg/kubelet/pleg/generic_test.go index baffabd5906..53f45c9ee1f 100644 --- a/pkg/kubelet/pleg/generic_test.go +++ b/pkg/kubelet/pleg/generic_test.go @@ -17,7 +17,6 @@ limitations under the License. package pleg import ( - "context" "errors" "fmt" "reflect" @@ -351,7 +350,6 @@ func createTestPodsStatusesAndEvents(num int) ([]*kubecontainer.Pod, []*kubecont } func TestRelistWithCache(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() runtimeMock := containertest.NewMockRuntime(mockCtrl) @@ -360,11 +358,11 @@ func TestRelistWithCache(t *testing.T) { ch := pleg.Watch() pods, statuses, events := createTestPodsStatusesAndEvents(2) - runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).AnyTimes() - runtimeMock.EXPECT().GetPodStatus(ctx, pods[0].ID, "", "").Return(statuses[0], nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return(pods, nil).AnyTimes() + runtimeMock.EXPECT().GetPodStatus(pods[0].ID, "", "").Return(statuses[0], nil).Times(1) // Inject an error when querying runtime for the pod status for pods[1]. statusErr := fmt.Errorf("unable to get status") - runtimeMock.EXPECT().GetPodStatus(ctx, pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Times(1) + runtimeMock.EXPECT().GetPodStatus(pods[1].ID, "", "").Return(&kubecontainer.PodStatus{}, statusErr).Times(1) pleg.relist() actualEvents := getEventsFromChannel(ch) @@ -386,7 +384,7 @@ func TestRelistWithCache(t *testing.T) { assert.Exactly(t, []*PodLifecycleEvent{events[0]}, actualEvents) // Return normal status for pods[1]. - runtimeMock.EXPECT().GetPodStatus(ctx, pods[1].ID, "", "").Return(statuses[1], nil).Times(1) + runtimeMock.EXPECT().GetPodStatus(pods[1].ID, "", "").Return(statuses[1], nil).Times(1) pleg.relist() actualEvents = getEventsFromChannel(ch) cases = []struct { @@ -408,20 +406,19 @@ func TestRelistWithCache(t *testing.T) { } func TestRemoveCacheEntry(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() runtimeMock := containertest.NewMockRuntime(mockCtrl) pleg := newTestGenericPLEGWithRuntimeMock(runtimeMock) pods, statuses, _ := createTestPodsStatusesAndEvents(1) - runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1) - runtimeMock.EXPECT().GetPodStatus(ctx, pods[0].ID, "", "").Return(statuses[0], nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1) + runtimeMock.EXPECT().GetPodStatus(pods[0].ID, "", "").Return(statuses[0], nil).Times(1) // Does a relist to populate the cache. pleg.relist() // Delete the pod from runtime. Verify that the cache entry has been // removed after relisting. - runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{}, nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{}, nil).Times(1) pleg.relist() actualStatus, actualErr := pleg.cache.Get(pods[0].ID) assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus) @@ -456,7 +453,6 @@ func TestHealthy(t *testing.T) { } func TestRelistWithReinspection(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() runtimeMock := containertest.NewMockRuntime(mockCtrl) @@ -471,13 +467,13 @@ func TestRelistWithReinspection(t *testing.T) { ID: podID, Containers: []*kubecontainer.Container{infraContainer}, }} - runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1) goodStatus := &kubecontainer.PodStatus{ ID: podID, ContainerStatuses: []*kubecontainer.Status{{ID: infraContainer.ID, State: infraContainer.State}}, } - runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(goodStatus, nil).Times(1) + runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(goodStatus, nil).Times(1) goodEvent := &PodLifecycleEvent{ID: podID, Type: ContainerStarted, Data: infraContainer.ID.ID} @@ -496,13 +492,13 @@ func TestRelistWithReinspection(t *testing.T) { ID: podID, Containers: []*kubecontainer.Container{infraContainer, transientContainer}, }} - runtimeMock.EXPECT().GetPods(ctx, true).Return(podsWithTransientContainer, nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return(podsWithTransientContainer, nil).Times(1) badStatus := &kubecontainer.PodStatus{ ID: podID, ContainerStatuses: []*kubecontainer.Status{}, } - runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(badStatus, errors.New("inspection error")).Times(1) + runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(badStatus, errors.New("inspection error")).Times(1) pleg.relist() actualEvents = getEventsFromChannel(ch) @@ -513,8 +509,8 @@ func TestRelistWithReinspection(t *testing.T) { // listing 3 - pretend the transient container has now disappeared, leaving just the infra // container. Make sure the pod is reinspected for its status and the cache is updated. - runtimeMock.EXPECT().GetPods(ctx, true).Return(pods, nil).Times(1) - runtimeMock.EXPECT().GetPodStatus(ctx, podID, "", "").Return(goodStatus, nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return(pods, nil).Times(1) + runtimeMock.EXPECT().GetPodStatus(podID, "", "").Return(goodStatus, nil).Times(1) pleg.relist() actualEvents = getEventsFromChannel(ch) @@ -595,7 +591,6 @@ func TestRelistingWithSandboxes(t *testing.T) { } func TestRelistIPChange(t *testing.T) { - ctx := context.Background() testCases := []struct { name string podID string @@ -636,8 +631,8 @@ func TestRelistIPChange(t *testing.T) { } event := &PodLifecycleEvent{ID: pod.ID, Type: ContainerStarted, Data: container.ID.ID} - runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{pod}, nil).Times(1) - runtimeMock.EXPECT().GetPodStatus(ctx, pod.ID, "", "").Return(status, nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{pod}, nil).Times(1) + runtimeMock.EXPECT().GetPodStatus(pod.ID, "", "").Return(status, nil).Times(1) pleg.relist() actualEvents := getEventsFromChannel(ch) @@ -657,8 +652,8 @@ func TestRelistIPChange(t *testing.T) { ContainerStatuses: []*kubecontainer.Status{{ID: container.ID, State: kubecontainer.ContainerStateExited}}, } event = &PodLifecycleEvent{ID: pod.ID, Type: ContainerDied, Data: container.ID.ID} - runtimeMock.EXPECT().GetPods(ctx, true).Return([]*kubecontainer.Pod{pod}, nil).Times(1) - runtimeMock.EXPECT().GetPodStatus(ctx, pod.ID, "", "").Return(status, nil).Times(1) + runtimeMock.EXPECT().GetPods(true).Return([]*kubecontainer.Pod{pod}, nil).Times(1) + runtimeMock.EXPECT().GetPodStatus(pod.ID, "", "").Return(status, nil).Times(1) pleg.relist() actualEvents = getEventsFromChannel(ch) diff --git a/pkg/kubelet/pod_container_deletor.go b/pkg/kubelet/pod_container_deletor.go index c4cecde4a87..975148ce4e5 100644 --- a/pkg/kubelet/pod_container_deletor.go +++ b/pkg/kubelet/pod_container_deletor.go @@ -17,7 +17,6 @@ limitations under the License. package kubelet import ( - "context" "sort" "k8s.io/apimachinery/pkg/util/wait" @@ -49,7 +48,7 @@ func newPodContainerDeletor(runtime kubecontainer.Runtime, containersToKeep int) go wait.Until(func() { for { id := <-buffer - if err := runtime.DeleteContainer(context.Background(), id); err != nil { + if err := runtime.DeleteContainer(id); err != nil { klog.InfoS("DeleteContainer returned error", "containerID", id, "err", err) } } diff --git a/pkg/kubelet/prober/prober.go b/pkg/kubelet/prober/prober.go index 948ba4b22cc..c6009c06efc 100644 --- a/pkg/kubelet/prober/prober.go +++ b/pkg/kubelet/prober/prober.go @@ -17,7 +17,6 @@ limitations under the License. package prober import ( - "context" "fmt" "io" "time" @@ -81,7 +80,7 @@ func (pb *prober) recordContainerEvent(pod *v1.Pod, container *v1.Container, eve } // probe probes the container. -func (pb *prober) probe(ctx context.Context, probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) { +func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (results.Result, error) { var probeSpec *v1.Probe switch probeType { case readiness: @@ -99,7 +98,7 @@ func (pb *prober) probe(ctx context.Context, probeType probeType, pod *v1.Pod, s return results.Success, nil } - result, output, err := pb.runProbeWithRetries(ctx, probeType, probeSpec, pod, status, container, containerID, maxProbeRetries) + result, output, err := pb.runProbeWithRetries(probeType, probeSpec, pod, status, container, containerID, maxProbeRetries) if err != nil || (result != probe.Success && result != probe.Warning) { // Probe failed in one way or another. if err != nil { @@ -122,12 +121,12 @@ func (pb *prober) probe(ctx context.Context, probeType probeType, pod *v1.Pod, s // runProbeWithRetries tries to probe the container in a finite loop, it returns the last result // if it never succeeds. -func (pb *prober) runProbeWithRetries(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { +func (pb *prober) runProbeWithRetries(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID, retries int) (probe.Result, string, error) { var err error var result probe.Result var output string for i := 0; i < retries; i++ { - result, output, err = pb.runProbe(ctx, probeType, p, pod, status, container, containerID) + result, output, err = pb.runProbe(probeType, p, pod, status, container, containerID) if err == nil { return result, output, nil } @@ -135,12 +134,12 @@ func (pb *prober) runProbeWithRetries(ctx context.Context, probeType probeType, return result, output, err } -func (pb *prober) runProbe(ctx context.Context, probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { +func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) { timeout := time.Duration(p.TimeoutSeconds) * time.Second if p.Exec != nil { klog.V(4).InfoS("Exec-Probe runProbe", "pod", klog.KObj(pod), "containerName", container.Name, "execCommand", p.Exec.Command) command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env) - return pb.exec.Probe(pb.newExecInContainer(ctx, container, containerID, command, timeout)) + return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout)) } if p.HTTPGet != nil { req, err := httpprobe.NewRequestForHTTPGetAction(p.HTTPGet, &container, status.PodIP, "probe") @@ -188,9 +187,9 @@ type execInContainer struct { writer io.Writer } -func (pb *prober) newExecInContainer(ctx context.Context, container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd { +func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd { return &execInContainer{run: func() ([]byte, error) { - return pb.runner.RunInContainer(ctx, containerID, cmd, timeout) + return pb.runner.RunInContainer(containerID, cmd, timeout) }} } diff --git a/pkg/kubelet/prober/prober_test.go b/pkg/kubelet/prober/prober_test.go index c9e1a316e57..6e5e936a93d 100644 --- a/pkg/kubelet/prober/prober_test.go +++ b/pkg/kubelet/prober/prober_test.go @@ -18,7 +18,6 @@ package prober import ( "bytes" - "context" "errors" "fmt" "reflect" @@ -133,7 +132,6 @@ func TestGetTCPAddrParts(t *testing.T) { } func TestProbe(t *testing.T) { - ctx := context.Background() containerID := kubecontainer.ContainerID{Type: "test", ID: "foobar"} execProbe := &v1.Probe{ @@ -236,7 +234,7 @@ func TestProbe(t *testing.T) { prober.exec = fakeExecProber{test.execResult, nil} } - result, err := prober.probe(ctx, probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) + result, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) if test.expectError && err == nil { t.Errorf("[%s] Expected probe error but no error was returned.", testID) } @@ -250,7 +248,7 @@ func TestProbe(t *testing.T) { if len(test.expectCommand) > 0 { prober.exec = execprobe.New() prober.runner = &containertest.FakeContainerCommandRunner{} - _, err := prober.probe(ctx, probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) + _, err := prober.probe(probeType, &v1.Pod{}, v1.PodStatus{}, testContainer, containerID) if err != nil { t.Errorf("[%s] Didn't expect probe error but got: %v", testID, err) continue @@ -264,7 +262,6 @@ func TestProbe(t *testing.T) { } func TestNewExecInContainer(t *testing.T) { - ctx := context.Background() limit := 1024 tenKilobyte := strings.Repeat("logs-123", 128*10) @@ -306,7 +303,7 @@ func TestNewExecInContainer(t *testing.T) { container := v1.Container{} containerID := kubecontainer.ContainerID{Type: "docker", ID: "containerID"} cmd := []string{"/foo", "bar"} - exec := prober.newExecInContainer(ctx, container, containerID, cmd, 0) + exec := prober.newExecInContainer(container, containerID, cmd, 0) var dataBuffer bytes.Buffer writer := ioutils.LimitWriter(&dataBuffer, int64(limit)) diff --git a/pkg/kubelet/prober/worker.go b/pkg/kubelet/prober/worker.go index b9ec0053de6..75273deadba 100644 --- a/pkg/kubelet/prober/worker.go +++ b/pkg/kubelet/prober/worker.go @@ -17,7 +17,6 @@ limitations under the License. package prober import ( - "context" "fmt" "math/rand" "strings" @@ -149,7 +148,6 @@ func newWorker( // run periodically probes the container. func (w *worker) run() { - ctx := context.Background() probeTickerPeriod := time.Duration(w.spec.PeriodSeconds) * time.Second // If kubelet restarted the probes could be started in rapid succession. @@ -177,7 +175,7 @@ func (w *worker) run() { }() probeLoop: - for w.doProbe(ctx) { + for w.doProbe() { // Wait for next probe tick. select { case <-w.stopCh: @@ -200,7 +198,7 @@ func (w *worker) stop() { // doProbe probes the container once and records the result. // Returns whether the worker should continue. -func (w *worker) doProbe(ctx context.Context) (keepGoing bool) { +func (w *worker) doProbe() (keepGoing bool) { defer func() { recover() }() // Actually eat panics (HandleCrash takes care of logging) defer runtime.HandleCrash(func(_ interface{}) { keepGoing = true }) @@ -286,7 +284,7 @@ func (w *worker) doProbe(ctx context.Context) (keepGoing bool) { } // Note, exec probe does NOT have access to pod environment variables or downward API - result, err := w.probeManager.prober.probe(ctx, w.probeType, w.pod, status, w.container, w.containerID) + result, err := w.probeManager.prober.probe(w.probeType, w.pod, status, w.container, w.containerID) if err != nil { // Prober error, throw away the result. return true diff --git a/pkg/kubelet/prober/worker_test.go b/pkg/kubelet/prober/worker_test.go index c4e0feb64fd..e86819fca88 100644 --- a/pkg/kubelet/prober/worker_test.go +++ b/pkg/kubelet/prober/worker_test.go @@ -17,7 +17,6 @@ limitations under the License. package prober import ( - "context" "fmt" "testing" "time" @@ -131,7 +130,6 @@ func TestDoProbe(t *testing.T) { } for i, test := range tests { - ctx := context.Background() w := newTestWorker(m, probeType, test.probe) if test.podStatus != nil { m.statusManager.SetPodStatus(w.pod, *test.podStatus) @@ -140,7 +138,7 @@ func TestDoProbe(t *testing.T) { now := metav1.Now() w.pod.ObjectMeta.DeletionTimestamp = &now } - if c := w.doProbe(ctx); c != test.expectContinue[probeType.String()] { + if c := w.doProbe(); c != test.expectContinue[probeType.String()] { t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue[probeType.String()], c) } result, ok := resultsManager(m, probeType).Get(testContainerID) @@ -159,7 +157,6 @@ func TestDoProbe(t *testing.T) { } func TestInitialDelay(t *testing.T) { - ctx := context.Background() m := newTestManager() for _, probeType := range [...]probeType{liveness, readiness, startup} { @@ -168,7 +165,7 @@ func TestInitialDelay(t *testing.T) { }) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(probeType != startup)) - expectContinue(t, w, w.doProbe(ctx), "during initial delay") + expectContinue(t, w, w.doProbe(), "during initial delay") // Default value depends on probe, Success for liveness, Failure for readiness, Unknown for startup switch probeType { case liveness: @@ -186,13 +183,12 @@ func TestInitialDelay(t *testing.T) { m.statusManager.SetPodStatus(w.pod, laterStatus) // Second call should succeed (already waited). - expectContinue(t, w, w.doProbe(ctx), "after initial delay") + expectContinue(t, w, w.doProbe(), "after initial delay") expectResult(t, w, results.Success, "after initial delay") } } func TestFailureThreshold(t *testing.T) { - ctx := context.Background() m := newTestManager() w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) @@ -203,7 +199,7 @@ func TestFailureThreshold(t *testing.T) { for j := 0; j < 3; j++ { msg := fmt.Sprintf("%d success (%d)", j+1, i) - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) } @@ -213,21 +209,20 @@ func TestFailureThreshold(t *testing.T) { // Next 2 probes should still be "success". for j := 0; j < 2; j++ { msg := fmt.Sprintf("%d failing (%d)", j+1, i) - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) } // Third & following fail. for j := 0; j < 3; j++ { msg := fmt.Sprintf("%d failure (%d)", j+3, i) - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) } } } func TestSuccessThreshold(t *testing.T) { - ctx := context.Background() m := newTestManager() w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 3, FailureThreshold: 1}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) @@ -239,21 +234,21 @@ func TestSuccessThreshold(t *testing.T) { // Probe defaults to Failure. for j := 0; j < 2; j++ { msg := fmt.Sprintf("%d success (%d)", j+1, i) - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) } // Continuing success! for j := 0; j < 3; j++ { msg := fmt.Sprintf("%d success (%d)", j+3, i) - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) } // Prober flakes :( m.prober.exec = fakeExecProber{probe.Failure, nil} msg := fmt.Sprintf("1 failure (%d)", i) - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) // Back to success. @@ -327,7 +322,6 @@ func resultsManager(m *manager, probeType probeType) results.Manager { } func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) { - ctx := context.Background() m := newTestManager() for _, probeType := range [...]probeType{liveness, startup} { @@ -338,7 +332,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) { // First probe should fail. m.prober.exec = fakeExecProber{probe.Failure, nil} msg := "first probe" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) if !w.onHold { t.Errorf("Prober should be on hold due to %s check failure", probeType) @@ -347,7 +341,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) { // failure because the worker is on hold and won't probe. m.prober.exec = fakeExecProber{probe.Success, nil} msg = "while on hold" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) if !w.onHold { t.Errorf("Prober should be on hold due to %s check failure", probeType) @@ -357,7 +351,7 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) { status.ContainerStatuses[0].ContainerID = "test://newCont_ID" m.statusManager.SetPodStatus(w.pod, status) msg = "hold lifted" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) if w.onHold { t.Errorf("Prober should not be on hold anymore") @@ -366,14 +360,13 @@ func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) { } func TestResultRunOnLivenessCheckFailure(t *testing.T) { - ctx := context.Background() m := newTestManager() w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatus()) m.prober.exec = fakeExecProber{probe.Success, nil} msg := "initial probe success" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) if w.resultRun != 1 { t.Errorf("Prober resultRun should be 1") @@ -381,7 +374,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) { m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "probe failure, result success" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) if w.resultRun != 1 { t.Errorf("Prober resultRun should be 1") @@ -389,7 +382,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) { m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "2nd probe failure, result success" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) if w.resultRun != 2 { t.Errorf("Prober resultRun should be 2") @@ -400,7 +393,7 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) { // also gets FailureThreshold attempts to succeed. m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "3rd probe failure, result failure" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) if w.resultRun != 0 { t.Errorf("Prober resultRun should be reset to 0") @@ -408,7 +401,6 @@ func TestResultRunOnLivenessCheckFailure(t *testing.T) { } func TestResultRunOnStartupCheckFailure(t *testing.T) { - ctx := context.Background() m := newTestManager() w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false)) @@ -417,7 +409,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) { // which is failed for startup at first. m.prober.exec = fakeExecProber{probe.Failure, nil} msg := "probe failure, result unknown" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Unknown, msg) if w.resultRun != 1 { t.Errorf("Prober resultRun should be 1") @@ -425,7 +417,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) { m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "2nd probe failure, result unknown" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Unknown, msg) if w.resultRun != 2 { t.Errorf("Prober resultRun should be 2") @@ -436,7 +428,7 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) { // also gets FailureThreshold attempts to succeed. m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "3rd probe failure, result failure" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) if w.resultRun != 0 { t.Errorf("Prober resultRun should be reset to 0") @@ -444,45 +436,43 @@ func TestResultRunOnStartupCheckFailure(t *testing.T) { } func TestLivenessProbeDisabledByStarted(t *testing.T) { - ctx := context.Background() m := newTestManager() w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false)) // livenessProbe fails, but is disabled m.prober.exec = fakeExecProber{probe.Failure, nil} msg := "Not started, probe failure, result success" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) // setting started state m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true) // livenessProbe fails m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "Started, probe failure, result failure" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Failure, msg) } func TestStartupProbeDisabledByStarted(t *testing.T) { - ctx := context.Background() m := newTestManager() w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 2}) m.statusManager.SetPodStatus(w.pod, getTestRunningStatusWithStarted(false)) // startupProbe fails < FailureThreshold, stays unknown m.prober.exec = fakeExecProber{probe.Failure, nil} msg := "Not started, probe failure, result unknown" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Unknown, msg) // startupProbe succeeds m.prober.exec = fakeExecProber{probe.Success, nil} msg = "Started, probe success, result success" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) // setting started state m.statusManager.SetContainerStartup(w.pod.UID, w.containerID, true) // startupProbe fails, but is disabled m.prober.exec = fakeExecProber{probe.Failure, nil} msg = "Started, probe failure, result success" - expectContinue(t, w, w.doProbe(ctx), msg) + expectContinue(t, w, w.doProbe(), msg) expectResult(t, w, results.Success, msg) } diff --git a/pkg/kubelet/runonce.go b/pkg/kubelet/runonce.go index 3c8d14c9e5a..00f3022af5a 100644 --- a/pkg/kubelet/runonce.go +++ b/pkg/kubelet/runonce.go @@ -44,7 +44,6 @@ type RunPodResult struct { // RunOnce polls from one configuration update and run the associated pods. func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, error) { - ctx := context.Background() // Setup filesystem directories. if err := kl.setupDataDirs(); err != nil { return nil, err @@ -60,7 +59,7 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, select { case u := <-updates: klog.InfoS("Processing manifest with pods", "numPods", len(u.Pods)) - result, err := kl.runOnce(ctx, u.Pods, runOnceRetryDelay) + result, err := kl.runOnce(u.Pods, runOnceRetryDelay) klog.InfoS("Finished processing pods", "numPods", len(u.Pods)) return result, err case <-time.After(runOnceManifestDelay): @@ -69,7 +68,7 @@ func (kl *Kubelet) RunOnce(updates <-chan kubetypes.PodUpdate) ([]RunPodResult, } // runOnce runs a given set of pods and returns their status. -func (kl *Kubelet) runOnce(ctx context.Context, pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { +func (kl *Kubelet) runOnce(pods []*v1.Pod, retryDelay time.Duration) (results []RunPodResult, err error) { ch := make(chan RunPodResult) admitted := []*v1.Pod{} for _, pod := range pods { @@ -82,7 +81,7 @@ func (kl *Kubelet) runOnce(ctx context.Context, pods []*v1.Pod, retryDelay time. admitted = append(admitted, pod) go func(pod *v1.Pod) { - err := kl.runPod(ctx, pod, retryDelay) + err := kl.runPod(pod, retryDelay) ch <- RunPodResult{pod, err} }(pod) } @@ -93,7 +92,7 @@ func (kl *Kubelet) runOnce(ctx context.Context, pods []*v1.Pod, retryDelay time. res := <-ch results = append(results, res) if res.Err != nil { - failedContainerName, err := kl.getFailedContainers(ctx, res.Pod) + failedContainerName, err := kl.getFailedContainers(res.Pod) if err != nil { klog.InfoS("Unable to get failed containers' names for pod", "pod", klog.KObj(res.Pod), "err", err) } else { @@ -112,12 +111,12 @@ func (kl *Kubelet) runOnce(ctx context.Context, pods []*v1.Pod, retryDelay time. } // runPod runs a single pod and wait until all containers are running. -func (kl *Kubelet) runPod(ctx context.Context, pod *v1.Pod, retryDelay time.Duration) error { +func (kl *Kubelet) runPod(pod *v1.Pod, retryDelay time.Duration) error { var isTerminal bool delay := retryDelay retry := 0 for !isTerminal { - status, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) + status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { return fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err) } @@ -133,7 +132,7 @@ func (kl *Kubelet) runPod(ctx context.Context, pod *v1.Pod, retryDelay time.Dura klog.ErrorS(err, "Failed creating a mirror pod", "pod", klog.KObj(pod)) } mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod) - if isTerminal, err = kl.syncPod(ctx, kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil { + if isTerminal, err = kl.syncPod(context.Background(), kubetypes.SyncPodUpdate, pod, mirrorPod, status); err != nil { return fmt.Errorf("error syncing pod %q: %v", format.Pod(pod), err) } if retry >= runOnceMaxRetries { @@ -161,8 +160,8 @@ func (kl *Kubelet) isPodRunning(pod *v1.Pod, status *kubecontainer.PodStatus) bo } // getFailedContainer returns failed container name for pod. -func (kl *Kubelet) getFailedContainers(ctx context.Context, pod *v1.Pod) ([]string, error) { - status, err := kl.containerRuntime.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) +func (kl *Kubelet) getFailedContainers(pod *v1.Pod) ([]string, error) { + status, err := kl.containerRuntime.GetPodStatus(pod.UID, pod.Name, pod.Namespace) if err != nil { return nil, fmt.Errorf("unable to get status for pod %q: %v", format.Pod(pod), err) } diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 7f73d6c75a7..c16f11b0f40 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -17,7 +17,6 @@ limitations under the License. package kubelet import ( - "context" "os" "testing" "time" @@ -53,7 +52,6 @@ import ( ) func TestRunOnce(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() @@ -170,7 +168,7 @@ func TestRunOnce(t *testing.T) { }, }, } - results, err := kb.runOnce(ctx, pods, time.Millisecond) + results, err := kb.runOnce(pods, time.Millisecond) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/kubelet/server/server.go b/pkg/kubelet/server/server.go index cae302289dc..4b3b096191d 100644 --- a/pkg/kubelet/server/server.go +++ b/pkg/kubelet/server/server.go @@ -239,17 +239,17 @@ type HostInterface interface { stats.Provider GetVersionInfo() (*cadvisorapi.VersionInfo, error) GetCachedMachineInfo() (*cadvisorapi.MachineInfo, error) - GetRunningPods(ctx context.Context) ([]*v1.Pod, error) - RunInContainer(ctx context.Context, name string, uid types.UID, container string, cmd []string) ([]byte, error) - CheckpointContainer(ctx context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error + GetRunningPods() ([]*v1.Pod, error) + RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error) + CheckpointContainer(podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error GetKubeletContainerLogs(ctx context.Context, podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error ServeLogs(w http.ResponseWriter, req *http.Request) ResyncInterval() time.Duration GetHostname() string LatestLoopEntryTime() time.Time - GetExec(ctx context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) - GetAttach(ctx context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) - GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) + GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) + GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) + GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) } // NewServer initializes and configures a kubelet.Server object to handle HTTP requests. @@ -740,8 +740,7 @@ func (s *Server) getPods(request *restful.Request, response *restful.Response) { // provided by the container runtime, and is different from the list returned // by getPods, which is a set of desired pods to run. func (s *Server) getRunningPods(request *restful.Request, response *restful.Response) { - ctx := request.Request.Context() - pods, err := s.host.GetRunningPods(ctx) + pods, err := s.host.GetRunningPods() if err != nil { response.WriteError(http.StatusInternalServerError, err) return @@ -821,7 +820,7 @@ func (s *Server) getAttach(request *restful.Request, response *restful.Response) } podFullName := kubecontainer.GetPodFullName(pod) - url, err := s.host.GetAttach(request.Request.Context(), podFullName, params.podUID, params.containerName, *streamOpts) + url, err := s.host.GetAttach(podFullName, params.podUID, params.containerName, *streamOpts) if err != nil { streaming.WriteError(err, response.ResponseWriter) return @@ -846,7 +845,7 @@ func (s *Server) getExec(request *restful.Request, response *restful.Response) { } podFullName := kubecontainer.GetPodFullName(pod) - url, err := s.host.GetExec(request.Request.Context(), podFullName, params.podUID, params.containerName, params.cmd, *streamOpts) + url, err := s.host.GetExec(podFullName, params.podUID, params.containerName, params.cmd, *streamOpts) if err != nil { streaming.WriteError(err, response.ResponseWriter) return @@ -865,7 +864,7 @@ func (s *Server) getRun(request *restful.Request, response *restful.Response) { // For legacy reasons, run uses different query param than exec. params.cmd = strings.Split(request.QueryParameter("cmd"), " ") - data, err := s.host.RunInContainer(request.Request.Context(), kubecontainer.GetPodFullName(pod), params.podUID, params.containerName, params.cmd) + data, err := s.host.RunInContainer(kubecontainer.GetPodFullName(pod), params.podUID, params.containerName, params.cmd) if err != nil { response.WriteError(http.StatusInternalServerError, err) return @@ -908,7 +907,7 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp return } - url, err := s.host.GetPortForward(request.Request.Context(), pod.Name, pod.Namespace, pod.UID, *portForwardOptions) + url, err := s.host.GetPortForward(pod.Name, pod.Namespace, pod.UID, *portForwardOptions) if err != nil { streaming.WriteError(err, response.ResponseWriter) return @@ -920,7 +919,6 @@ func (s *Server) getPortForward(request *restful.Request, response *restful.Resp // podNamespace, pod and container actually exist and only then calls out // to the runtime to actually checkpoint the container. func (s *Server) checkpoint(request *restful.Request, response *restful.Response) { - ctx := request.Request.Context() pod, ok := s.host.GetPodByName(request.PathParameter("podNamespace"), request.PathParameter("podID")) if !ok { response.WriteError(http.StatusNotFound, fmt.Errorf("pod does not exist")) @@ -975,7 +973,7 @@ func (s *Server) checkpoint(request *restful.Request, response *restful.Response options.Timeout = timeout } - if err := s.host.CheckpointContainer(ctx, pod.UID, kubecontainer.GetPodFullName(pod), containerName, options); err != nil { + if err := s.host.CheckpointContainer(pod.UID, kubecontainer.GetPodFullName(pod), containerName, options); err != nil { response.WriteError( http.StatusInternalServerError, fmt.Errorf( diff --git a/pkg/kubelet/server/server_test.go b/pkg/kubelet/server/server_test.go index 61dc349626f..9c3a15fb10d 100644 --- a/pkg/kubelet/server/server_test.go +++ b/pkg/kubelet/server/server_test.go @@ -74,11 +74,11 @@ const ( type fakeKubelet struct { podByNameFunc func(namespace, name string) (*v1.Pod, bool) - containerInfoFunc func(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) rawInfoFunc func(query *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) machineInfoFunc func() (*cadvisorapi.MachineInfo, error) podsFunc func() []*v1.Pod - runningPodsFunc func(ctx context.Context) ([]*v1.Pod, error) + runningPodsFunc func() ([]*v1.Pod, error) logFunc func(w http.ResponseWriter, req *http.Request) runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) getExecCheck func(string, types.UID, string, []string, remotecommandserver.Options) @@ -109,8 +109,8 @@ func (fk *fakeKubelet) GetRequestedContainersInfo(containerName string, options return map[string]*cadvisorapi.ContainerInfo{}, nil } -func (fk *fakeKubelet) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { - return fk.containerInfoFunc(ctx, podFullName, uid, containerName, req) +func (fk *fakeKubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return fk.containerInfoFunc(podFullName, uid, containerName, req) } func (fk *fakeKubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) { @@ -129,8 +129,8 @@ func (fk *fakeKubelet) GetPods() []*v1.Pod { return fk.podsFunc() } -func (fk *fakeKubelet) GetRunningPods(ctx context.Context) ([]*v1.Pod, error) { - return fk.runningPodsFunc(ctx) +func (fk *fakeKubelet) GetRunningPods() ([]*v1.Pod, error) { + return fk.runningPodsFunc() } func (fk *fakeKubelet) ServeLogs(w http.ResponseWriter, req *http.Request) { @@ -145,11 +145,11 @@ func (fk *fakeKubelet) GetHostname() string { return fk.hostnameFunc() } -func (fk *fakeKubelet) RunInContainer(_ context.Context, podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) { +func (fk *fakeKubelet) RunInContainer(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) { return fk.runFunc(podFullName, uid, containerName, cmd) } -func (fk *fakeKubelet) CheckpointContainer(_ context.Context, podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error { +func (fk *fakeKubelet) CheckpointContainer(podUID types.UID, podFullName, containerName string, options *runtimeapi.CheckpointContainerRequest) error { if containerName == "checkpointingFailure" { return fmt.Errorf("Returning error for test") } @@ -162,15 +162,15 @@ type fakeRuntime struct { portForwardFunc func(string, int32, io.ReadWriteCloser) error } -func (f *fakeRuntime) Exec(_ context.Context, containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { +func (f *fakeRuntime) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { return f.execFunc(containerID, cmd, stdin, stdout, stderr, tty, resize) } -func (f *fakeRuntime) Attach(_ context.Context, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { +func (f *fakeRuntime) Attach(containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { return f.attachFunc(containerID, stdin, stdout, stderr, tty, resize) } -func (f *fakeRuntime) PortForward(_ context.Context, podSandboxID string, port int32, stream io.ReadWriteCloser) error { +func (f *fakeRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { return f.portForwardFunc(podSandboxID, port, stream) } @@ -209,7 +209,7 @@ func newTestStreamingServer(streamIdleTimeout time.Duration) (s *testStreamingSe return s, nil } -func (fk *fakeKubelet) GetExec(_ context.Context, podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { +func (fk *fakeKubelet) GetExec(podFullName string, podUID types.UID, containerName string, cmd []string, streamOpts remotecommandserver.Options) (*url.URL, error) { if fk.getExecCheck != nil { fk.getExecCheck(podFullName, podUID, containerName, cmd, streamOpts) } @@ -228,7 +228,7 @@ func (fk *fakeKubelet) GetExec(_ context.Context, podFullName string, podUID typ return url.Parse(resp.GetUrl()) } -func (fk *fakeKubelet) GetAttach(_ context.Context, podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { +func (fk *fakeKubelet) GetAttach(podFullName string, podUID types.UID, containerName string, streamOpts remotecommandserver.Options) (*url.URL, error) { if fk.getAttachCheck != nil { fk.getAttachCheck(podFullName, podUID, containerName, streamOpts) } @@ -246,7 +246,7 @@ func (fk *fakeKubelet) GetAttach(_ context.Context, podFullName string, podUID t return url.Parse(resp.GetUrl()) } -func (fk *fakeKubelet) GetPortForward(ctx context.Context, podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { +func (fk *fakeKubelet) GetPortForward(podName, podNamespace string, podUID types.UID, portForwardOpts portforward.V4Options) (*url.URL, error) { if fk.getPortForwardCheck != nil { fk.getPortForwardCheck(podName, podNamespace, podUID, portForwardOpts) } @@ -272,16 +272,14 @@ func (fk *fakeKubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Vo func (*fakeKubelet) ListBlockVolumesForPod(podUID types.UID) (map[string]volume.BlockVolume, bool) { return map[string]volume.BlockVolume{}, true } -func (*fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil } -func (*fakeKubelet) ListPodStats(_ context.Context) ([]statsapi.PodStats, error) { return nil, nil } -func (*fakeKubelet) ListPodStatsAndUpdateCPUNanoCoreUsage(_ context.Context) ([]statsapi.PodStats, error) { +func (*fakeKubelet) RootFsStats() (*statsapi.FsStats, error) { return nil, nil } +func (*fakeKubelet) ListPodStats() ([]statsapi.PodStats, error) { return nil, nil } +func (*fakeKubelet) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) { return nil, nil } -func (*fakeKubelet) ListPodCPUAndMemoryStats(_ context.Context) ([]statsapi.PodStats, error) { - return nil, nil -} -func (*fakeKubelet) ImageFsStats(_ context.Context) (*statsapi.FsStats, error) { return nil, nil } -func (*fakeKubelet) RlimitStats() (*statsapi.RlimitStats, error) { return nil, nil } +func (*fakeKubelet) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { return nil, nil } +func (*fakeKubelet) ImageFsStats() (*statsapi.FsStats, error) { return nil, nil } +func (*fakeKubelet) RlimitStats() (*statsapi.RlimitStats, error) { return nil, nil } func (*fakeKubelet) GetCgroupStats(cgroupName string, updateStats bool) (*statsapi.ContainerStats, *statsapi.NetworkStats, error) { return nil, nil, nil } diff --git a/pkg/kubelet/server/stats/handler.go b/pkg/kubelet/server/stats/handler.go index 24acdbcb093..315d963c5f7 100644 --- a/pkg/kubelet/server/stats/handler.go +++ b/pkg/kubelet/server/stats/handler.go @@ -18,7 +18,6 @@ limitations under the License. package stats import ( - "context" "fmt" "net/http" @@ -27,7 +26,7 @@ import ( cadvisorv2 "github.com/google/cadvisor/info/v2" "k8s.io/klog/v2" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1" "k8s.io/kubernetes/pkg/kubelet/cm" @@ -40,18 +39,18 @@ type Provider interface { // The following stats are provided by either CRI or cAdvisor. // // ListPodStats returns the stats of all the containers managed by pods. - ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) + ListPodStats() ([]statsapi.PodStats, error) // ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for // the containers and returns the stats for all the pod-managed containers. - ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) + ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) // ListPodStatsAndUpdateCPUNanoCoreUsage returns the stats of all the // containers managed by pods and force update the cpu usageNanoCores. // This is a workaround for CRI runtimes that do not integrate with // cadvisor. See https://github.com/kubernetes/kubernetes/issues/72788 // for more details. - ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) + ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) // ImageFsStats returns the stats of the image filesystem. - ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) + ImageFsStats() (*statsapi.FsStats, error) // The following stats are provided by cAdvisor. // @@ -68,7 +67,7 @@ type Provider interface { // // GetContainerInfo returns the information of the container with the // containerName managed by the pod with the uid. - GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) + GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) // GetRawContainerInfo returns the information of the container with the // containerName. If subcontainers is true, this function will return the // information of all the sub-containers as well. @@ -141,7 +140,6 @@ func CreateHandlers(rootPath string, provider Provider, summaryProvider SummaryP // Handles stats summary requests to /stats/summary // If "only_cpu_and_memory" GET param is true then only cpu and memory is returned in response. func (h *handler) handleSummary(request *restful.Request, response *restful.Response) { - ctx := request.Request.Context() onlyCPUAndMemory := false err := request.Request.ParseForm() if err != nil { @@ -154,11 +152,11 @@ func (h *handler) handleSummary(request *restful.Request, response *restful.Resp } var summary *statsapi.Summary if onlyCPUAndMemory { - summary, err = h.summaryProvider.GetCPUAndMemoryStats(ctx) + summary, err = h.summaryProvider.GetCPUAndMemoryStats() } else { // external calls to the summary API use cached stats forceStatsUpdate := false - summary, err = h.summaryProvider.Get(ctx, forceStatsUpdate) + summary, err = h.summaryProvider.Get(forceStatsUpdate) } if err != nil { handleError(response, "/stats/summary", err) diff --git a/pkg/kubelet/server/stats/summary.go b/pkg/kubelet/server/stats/summary.go index fb0719b8ab0..297b2bf27fa 100644 --- a/pkg/kubelet/server/stats/summary.go +++ b/pkg/kubelet/server/stats/summary.go @@ -18,7 +18,6 @@ limitations under the License. package stats import ( - "context" "fmt" "k8s.io/klog/v2" @@ -32,9 +31,9 @@ import ( type SummaryProvider interface { // Get provides a new Summary with the stats from Kubelet, // and will update some stats if updateStats is true - Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) + Get(updateStats bool) (*statsapi.Summary, error) // GetCPUAndMemoryStats provides a new Summary with the CPU and memory stats from Kubelet, - GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) + GetCPUAndMemoryStats() (*statsapi.Summary, error) } // summaryProviderImpl implements the SummaryProvider interface. @@ -66,7 +65,7 @@ func NewSummaryProvider(statsProvider Provider) SummaryProvider { } } -func (sp *summaryProviderImpl) Get(ctx context.Context, updateStats bool) (*statsapi.Summary, error) { +func (sp *summaryProviderImpl) Get(updateStats bool) (*statsapi.Summary, error) { // TODO(timstclair): Consider returning a best-effort response if any of // the following errors occur. node, err := sp.provider.GetNode() @@ -82,15 +81,15 @@ func (sp *summaryProviderImpl) Get(ctx context.Context, updateStats bool) (*stat if err != nil { return nil, fmt.Errorf("failed to get rootFs stats: %v", err) } - imageFsStats, err := sp.provider.ImageFsStats(ctx) + imageFsStats, err := sp.provider.ImageFsStats() if err != nil { return nil, fmt.Errorf("failed to get imageFs stats: %v", err) } var podStats []statsapi.PodStats if updateStats { - podStats, err = sp.provider.ListPodStatsAndUpdateCPUNanoCoreUsage(ctx) + podStats, err = sp.provider.ListPodStatsAndUpdateCPUNanoCoreUsage() } else { - podStats, err = sp.provider.ListPodStats(ctx) + podStats, err = sp.provider.ListPodStats() } if err != nil { return nil, fmt.Errorf("failed to list pod stats: %v", err) @@ -119,7 +118,7 @@ func (sp *summaryProviderImpl) Get(ctx context.Context, updateStats bool) (*stat return &summary, nil } -func (sp *summaryProviderImpl) GetCPUAndMemoryStats(ctx context.Context) (*statsapi.Summary, error) { +func (sp *summaryProviderImpl) GetCPUAndMemoryStats() (*statsapi.Summary, error) { // TODO(timstclair): Consider returning a best-effort response if any of // the following errors occur. node, err := sp.provider.GetNode() @@ -132,7 +131,7 @@ func (sp *summaryProviderImpl) GetCPUAndMemoryStats(ctx context.Context) (*stats return nil, fmt.Errorf("failed to get root cgroup stats: %v", err) } - podStats, err := sp.provider.ListPodCPUAndMemoryStats(ctx) + podStats, err := sp.provider.ListPodCPUAndMemoryStats() if err != nil { return nil, fmt.Errorf("failed to list pod stats: %v", err) } diff --git a/pkg/kubelet/server/stats/summary_test.go b/pkg/kubelet/server/stats/summary_test.go index af5525d2e4a..dae4638f241 100644 --- a/pkg/kubelet/server/stats/summary_test.go +++ b/pkg/kubelet/server/stats/summary_test.go @@ -20,7 +20,6 @@ limitations under the License. package stats import ( - "context" "testing" "time" @@ -49,7 +48,6 @@ var ( ) func TestSummaryProviderGetStats(t *testing.T) { - ctx := context.Background() assert := assert.New(t) podStats := []statsapi.PodStats{ @@ -79,9 +77,9 @@ func TestSummaryProviderGetStats(t *testing.T) { mockStatsProvider.EXPECT().GetNode().Return(node, nil) mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig) mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot) - mockStatsProvider.EXPECT().ListPodStats(ctx).Return(podStats, nil).AnyTimes() - mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage(ctx).Return(podStats, nil) - mockStatsProvider.EXPECT().ImageFsStats(ctx).Return(imageFsStats, nil) + mockStatsProvider.EXPECT().ListPodStats().Return(podStats, nil).AnyTimes() + mockStatsProvider.EXPECT().ListPodStatsAndUpdateCPUNanoCoreUsage().Return(podStats, nil) + mockStatsProvider.EXPECT().ImageFsStats().Return(imageFsStats, nil) mockStatsProvider.EXPECT().RootFsStats().Return(rootFsStats, nil) mockStatsProvider.EXPECT().RlimitStats().Return(rlimitStats, nil) mockStatsProvider.EXPECT().GetCgroupStats("/", true).Return(cgroupStatsMap["/"].cs, cgroupStatsMap["/"].ns, nil) @@ -93,7 +91,7 @@ func TestSummaryProviderGetStats(t *testing.T) { kubeletCreationTime := metav1.Now() systemBootTime := metav1.Now() provider := summaryProviderImpl{kubeletCreationTime: kubeletCreationTime, systemBootTime: systemBootTime, provider: mockStatsProvider} - summary, err := provider.Get(ctx, true) + summary, err := provider.Get(true) assert.NoError(err) assert.Equal(summary.Node.NodeName, "test-node") @@ -141,7 +139,6 @@ func TestSummaryProviderGetStats(t *testing.T) { } func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) { - ctx := context.Background() assert := assert.New(t) podStats := []statsapi.PodStats{ @@ -168,7 +165,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) { mockStatsProvider.EXPECT().GetNode().Return(node, nil) mockStatsProvider.EXPECT().GetNodeConfig().Return(nodeConfig) mockStatsProvider.EXPECT().GetPodCgroupRoot().Return(cgroupRoot) - mockStatsProvider.EXPECT().ListPodCPUAndMemoryStats(ctx).Return(podStats, nil) + mockStatsProvider.EXPECT().ListPodCPUAndMemoryStats().Return(podStats, nil) mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/", false).Return(cgroupStatsMap["/"].cs, nil) mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/runtime", false).Return(cgroupStatsMap["/runtime"].cs, nil) mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/misc", false).Return(cgroupStatsMap["/misc"].cs, nil) @@ -176,7 +173,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) { mockStatsProvider.EXPECT().GetCgroupCPUAndMemoryStats("/kubepods", false).Return(cgroupStatsMap["/pods"].cs, nil) provider := NewSummaryProvider(mockStatsProvider) - summary, err := provider.GetCPUAndMemoryStats(ctx) + summary, err := provider.GetCPUAndMemoryStats() assert.NoError(err) assert.Equal(summary.Node.NodeName, "test-node") diff --git a/pkg/kubelet/server/stats/testing/mock_stats_provider.go b/pkg/kubelet/server/stats/testing/mock_stats_provider.go index 2dc4ab58c4f..2f97bc009fc 100644 --- a/pkg/kubelet/server/stats/testing/mock_stats_provider.go +++ b/pkg/kubelet/server/stats/testing/mock_stats_provider.go @@ -21,7 +21,6 @@ limitations under the License. package testing import ( - context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" @@ -89,18 +88,18 @@ func (mr *MockProviderMockRecorder) GetCgroupStats(cgroupName, updateStats inter } // GetContainerInfo mocks base method. -func (m *MockProvider) GetContainerInfo(ctx context.Context, podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) { +func (m *MockProvider) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *v1.ContainerInfoRequest) (*v1.ContainerInfo, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetContainerInfo", ctx, podFullName, uid, containerName, req) + ret := m.ctrl.Call(m, "GetContainerInfo", podFullName, uid, containerName, req) ret0, _ := ret[0].(*v1.ContainerInfo) ret1, _ := ret[1].(error) return ret0, ret1 } // GetContainerInfo indicates an expected call of GetContainerInfo. -func (mr *MockProviderMockRecorder) GetContainerInfo(ctx, podFullName, uid, containerName, req interface{}) *gomock.Call { +func (mr *MockProviderMockRecorder) GetContainerInfo(podFullName, uid, containerName, req interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInfo", reflect.TypeOf((*MockProvider)(nil).GetContainerInfo), ctx, podFullName, uid, containerName, req) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContainerInfo", reflect.TypeOf((*MockProvider)(nil).GetContainerInfo), podFullName, uid, containerName, req) } // GetNode mocks base method. @@ -221,18 +220,18 @@ func (mr *MockProviderMockRecorder) GetRequestedContainersInfo(containerName, op } // ImageFsStats mocks base method. -func (m *MockProvider) ImageFsStats(ctx context.Context) (*v1alpha1.FsStats, error) { +func (m *MockProvider) ImageFsStats() (*v1alpha1.FsStats, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ImageFsStats", ctx) + ret := m.ctrl.Call(m, "ImageFsStats") ret0, _ := ret[0].(*v1alpha1.FsStats) ret1, _ := ret[1].(error) return ret0, ret1 } // ImageFsStats indicates an expected call of ImageFsStats. -func (mr *MockProviderMockRecorder) ImageFsStats(ctx interface{}) *gomock.Call { +func (mr *MockProviderMockRecorder) ImageFsStats() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsStats", reflect.TypeOf((*MockProvider)(nil).ImageFsStats), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ImageFsStats", reflect.TypeOf((*MockProvider)(nil).ImageFsStats)) } // ListBlockVolumesForPod mocks base method. @@ -251,48 +250,48 @@ func (mr *MockProviderMockRecorder) ListBlockVolumesForPod(podUID interface{}) * } // ListPodCPUAndMemoryStats mocks base method. -func (m *MockProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]v1alpha1.PodStats, error) { +func (m *MockProvider) ListPodCPUAndMemoryStats() ([]v1alpha1.PodStats, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPodCPUAndMemoryStats", ctx) + ret := m.ctrl.Call(m, "ListPodCPUAndMemoryStats") ret0, _ := ret[0].([]v1alpha1.PodStats) ret1, _ := ret[1].(error) return ret0, ret1 } // ListPodCPUAndMemoryStats indicates an expected call of ListPodCPUAndMemoryStats. -func (mr *MockProviderMockRecorder) ListPodCPUAndMemoryStats(ctx interface{}) *gomock.Call { +func (mr *MockProviderMockRecorder) ListPodCPUAndMemoryStats() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodCPUAndMemoryStats", reflect.TypeOf((*MockProvider)(nil).ListPodCPUAndMemoryStats), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodCPUAndMemoryStats", reflect.TypeOf((*MockProvider)(nil).ListPodCPUAndMemoryStats)) } // ListPodStats mocks base method. -func (m *MockProvider) ListPodStats(ctx context.Context) ([]v1alpha1.PodStats, error) { +func (m *MockProvider) ListPodStats() ([]v1alpha1.PodStats, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPodStats", ctx) + ret := m.ctrl.Call(m, "ListPodStats") ret0, _ := ret[0].([]v1alpha1.PodStats) ret1, _ := ret[1].(error) return ret0, ret1 } // ListPodStats indicates an expected call of ListPodStats. -func (mr *MockProviderMockRecorder) ListPodStats(ctx interface{}) *gomock.Call { +func (mr *MockProviderMockRecorder) ListPodStats() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStats", reflect.TypeOf((*MockProvider)(nil).ListPodStats), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStats", reflect.TypeOf((*MockProvider)(nil).ListPodStats)) } // ListPodStatsAndUpdateCPUNanoCoreUsage mocks base method. -func (m *MockProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]v1alpha1.PodStats, error) { +func (m *MockProvider) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]v1alpha1.PodStats, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListPodStatsAndUpdateCPUNanoCoreUsage", ctx) + ret := m.ctrl.Call(m, "ListPodStatsAndUpdateCPUNanoCoreUsage") ret0, _ := ret[0].([]v1alpha1.PodStats) ret1, _ := ret[1].(error) return ret0, ret1 } // ListPodStatsAndUpdateCPUNanoCoreUsage indicates an expected call of ListPodStatsAndUpdateCPUNanoCoreUsage. -func (mr *MockProviderMockRecorder) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx interface{}) *gomock.Call { +func (mr *MockProviderMockRecorder) ListPodStatsAndUpdateCPUNanoCoreUsage() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStatsAndUpdateCPUNanoCoreUsage", reflect.TypeOf((*MockProvider)(nil).ListPodStatsAndUpdateCPUNanoCoreUsage), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPodStatsAndUpdateCPUNanoCoreUsage", reflect.TypeOf((*MockProvider)(nil).ListPodStatsAndUpdateCPUNanoCoreUsage)) } // ListVolumesForPod mocks base method. diff --git a/pkg/kubelet/server/stats/testing/mock_summary_provider.go b/pkg/kubelet/server/stats/testing/mock_summary_provider.go index e4e22200aab..ab7131fc37d 100644 --- a/pkg/kubelet/server/stats/testing/mock_summary_provider.go +++ b/pkg/kubelet/server/stats/testing/mock_summary_provider.go @@ -21,7 +21,6 @@ limitations under the License. package testing import ( - context "context" reflect "reflect" gomock "github.com/golang/mock/gomock" @@ -52,31 +51,31 @@ func (m *MockSummaryProvider) EXPECT() *MockSummaryProviderMockRecorder { } // Get mocks base method. -func (m *MockSummaryProvider) Get(ctx context.Context, updateStats bool) (*v1alpha1.Summary, error) { +func (m *MockSummaryProvider) Get(updateStats bool) (*v1alpha1.Summary, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Get", ctx, updateStats) + ret := m.ctrl.Call(m, "Get", updateStats) ret0, _ := ret[0].(*v1alpha1.Summary) ret1, _ := ret[1].(error) return ret0, ret1 } // Get indicates an expected call of Get. -func (mr *MockSummaryProviderMockRecorder) Get(ctx, updateStats interface{}) *gomock.Call { +func (mr *MockSummaryProviderMockRecorder) Get(updateStats interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSummaryProvider)(nil).Get), ctx, updateStats) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSummaryProvider)(nil).Get), updateStats) } // GetCPUAndMemoryStats mocks base method. -func (m *MockSummaryProvider) GetCPUAndMemoryStats(ctx context.Context) (*v1alpha1.Summary, error) { +func (m *MockSummaryProvider) GetCPUAndMemoryStats() (*v1alpha1.Summary, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetCPUAndMemoryStats", ctx) + ret := m.ctrl.Call(m, "GetCPUAndMemoryStats") ret0, _ := ret[0].(*v1alpha1.Summary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetCPUAndMemoryStats indicates an expected call of GetCPUAndMemoryStats. -func (mr *MockSummaryProviderMockRecorder) GetCPUAndMemoryStats(ctx interface{}) *gomock.Call { +func (mr *MockSummaryProviderMockRecorder) GetCPUAndMemoryStats() *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCPUAndMemoryStats", reflect.TypeOf((*MockSummaryProvider)(nil).GetCPUAndMemoryStats), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCPUAndMemoryStats", reflect.TypeOf((*MockSummaryProvider)(nil).GetCPUAndMemoryStats)) } diff --git a/pkg/kubelet/stats/cadvisor_stats_provider.go b/pkg/kubelet/stats/cadvisor_stats_provider.go index a896610c987..f6a8680a39f 100644 --- a/pkg/kubelet/stats/cadvisor_stats_provider.go +++ b/pkg/kubelet/stats/cadvisor_stats_provider.go @@ -17,7 +17,6 @@ limitations under the License. package stats import ( - "context" "fmt" "path" "sort" @@ -76,7 +75,7 @@ func newCadvisorStatsProvider( } // ListPodStats returns the stats of all the pod-managed containers. -func (p *cadvisorStatsProvider) ListPodStats(_ context.Context) ([]statsapi.PodStats, error) { +func (p *cadvisorStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // Gets node root filesystem information and image filesystem stats, which // will be used to populate the available and capacity bytes/inodes in // container stats. @@ -170,12 +169,12 @@ func (p *cadvisorStatsProvider) ListPodStats(_ context.Context) ([]statsapi.PodS // the containers and returns the stats for all the pod-managed containers. // For cadvisor, cpu nano core usages are pre-computed and cached, so this // function simply calls ListPodStats. -func (p *cadvisorStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) { - return p.ListPodStats(ctx) +func (p *cadvisorStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) { + return p.ListPodStats() } // ListPodCPUAndMemoryStats returns the cpu and memory stats of all the pod-managed containers. -func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats(_ context.Context) ([]statsapi.PodStats, error) { +func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { infos, err := getCadvisorContainerInfo(p.cadvisor) if err != nil { return nil, fmt.Errorf("failed to get container info from cadvisor: %v", err) @@ -235,12 +234,12 @@ func (p *cadvisorStatsProvider) ListPodCPUAndMemoryStats(_ context.Context) ([]s } // ImageFsStats returns the stats of the filesystem for storing images. -func (p *cadvisorStatsProvider) ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) { +func (p *cadvisorStatsProvider) ImageFsStats() (*statsapi.FsStats, error) { imageFsInfo, err := p.cadvisor.ImagesFsInfo() if err != nil { return nil, fmt.Errorf("failed to get imageFs info: %v", err) } - imageStats, err := p.imageService.ImageStats(ctx) + imageStats, err := p.imageService.ImageStats() if err != nil || imageStats == nil { return nil, fmt.Errorf("failed to get image stats: %v", err) } @@ -264,7 +263,7 @@ func (p *cadvisorStatsProvider) ImageFsStats(ctx context.Context) (*statsapi.FsS // ImageFsDevice returns name of the device where the image filesystem locates, // e.g. /dev/sda1. -func (p *cadvisorStatsProvider) ImageFsDevice(_ context.Context) (string, error) { +func (p *cadvisorStatsProvider) ImageFsDevice() (string, error) { imageFsInfo, err := p.cadvisor.ImagesFsInfo() if err != nil { return "", err diff --git a/pkg/kubelet/stats/cadvisor_stats_provider_test.go b/pkg/kubelet/stats/cadvisor_stats_provider_test.go index 25decaafe86..7e44a4c587f 100644 --- a/pkg/kubelet/stats/cadvisor_stats_provider_test.go +++ b/pkg/kubelet/stats/cadvisor_stats_provider_test.go @@ -17,7 +17,6 @@ limitations under the License. package stats import ( - "context" "testing" "github.com/golang/mock/gomock" @@ -100,7 +99,6 @@ func TestFilterTerminatedContainerInfoAndAssembleByPodCgroupKey(t *testing.T) { } func TestCadvisorListPodStats(t *testing.T) { - ctx := context.Background() const ( namespace0 = "test0" namespace2 = "test2" @@ -240,7 +238,7 @@ func TestCadvisorListPodStats(t *testing.T) { mockCadvisor.EXPECT().ImagesFsInfo().Return(imagefs, nil) mockRuntime := containertest.NewMockRuntime(mockCtrl) - mockRuntime.EXPECT().ImageStats(ctx).Return(&kubecontainer.ImageStats{TotalStorageBytes: 123}, nil).AnyTimes() + mockRuntime.EXPECT().ImageStats().Return(&kubecontainer.ImageStats{TotalStorageBytes: 123}, nil).AnyTimes() ephemeralVolumes := []statsapi.VolumeStats{getPodVolumeStats(seedEphemeralVolume1, "ephemeralVolume1"), getPodVolumeStats(seedEphemeralVolume2, "ephemeralVolume2")} @@ -263,7 +261,7 @@ func TestCadvisorListPodStats(t *testing.T) { resourceAnalyzer := &fakeResourceAnalyzer{podVolumeStats: volumeStats} p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, mockRuntime, mockStatus, NewFakeHostStatsProvider()) - pods, err := p.ListPodStats(ctx) + pods, err := p.ListPodStats() assert.NoError(t, err) assert.Equal(t, 4, len(pods)) @@ -337,7 +335,6 @@ func TestCadvisorListPodStats(t *testing.T) { } func TestCadvisorListPodCPUAndMemoryStats(t *testing.T) { - ctx := context.Background() const ( namespace0 = "test0" namespace2 = "test2" @@ -431,7 +428,7 @@ func TestCadvisorListPodCPUAndMemoryStats(t *testing.T) { resourceAnalyzer := &fakeResourceAnalyzer{podVolumeStats: volumeStats} p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, nil, nil, NewFakeHostStatsProvider()) - pods, err := p.ListPodCPUAndMemoryStats(ctx) + pods, err := p.ListPodCPUAndMemoryStats() assert.NoError(t, err) assert.Equal(t, 3, len(pods)) @@ -503,7 +500,6 @@ func TestCadvisorListPodCPUAndMemoryStats(t *testing.T) { } func TestCadvisorImagesFsStats(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() var ( @@ -517,10 +513,10 @@ func TestCadvisorImagesFsStats(t *testing.T) { ) mockCadvisor.EXPECT().ImagesFsInfo().Return(imageFsInfo, nil) - mockRuntime.EXPECT().ImageStats(ctx).Return(imageStats, nil) + mockRuntime.EXPECT().ImageStats().Return(imageStats, nil) provider := newCadvisorStatsProvider(mockCadvisor, &fakeResourceAnalyzer{}, mockRuntime, nil, NewFakeHostStatsProvider()) - stats, err := provider.ImageFsStats(ctx) + stats, err := provider.ImageFsStats() assert.NoError(err) assert.Equal(imageFsInfo.Timestamp, stats.Time.Time) @@ -533,7 +529,6 @@ func TestCadvisorImagesFsStats(t *testing.T) { } func TestCadvisorListPodStatsWhenContainerLogFound(t *testing.T) { - ctx := context.Background() const ( namespace0 = "test0" ) @@ -618,7 +613,7 @@ func TestCadvisorListPodStatsWhenContainerLogFound(t *testing.T) { mockCadvisor.EXPECT().ImagesFsInfo().Return(imagefs, nil) mockRuntime := containertest.NewMockRuntime(mockCtrl) - mockRuntime.EXPECT().ImageStats(ctx).Return(&kubecontainer.ImageStats{TotalStorageBytes: 123}, nil).AnyTimes() + mockRuntime.EXPECT().ImageStats().Return(&kubecontainer.ImageStats{TotalStorageBytes: 123}, nil).AnyTimes() volumeStats := serverstats.PodVolumeStats{} p0Time := metav1.Now() @@ -628,7 +623,7 @@ func TestCadvisorListPodStatsWhenContainerLogFound(t *testing.T) { resourceAnalyzer := &fakeResourceAnalyzer{podVolumeStats: volumeStats} p := NewCadvisorStatsProvider(mockCadvisor, resourceAnalyzer, nil, nil, mockRuntime, mockStatus, NewFakeHostStatsProviderWithData(fakeStats, fakeOS)) - pods, err := p.ListPodStats(ctx) + pods, err := p.ListPodStats() assert.NoError(t, err) assert.Equal(t, 1, len(pods)) diff --git a/pkg/kubelet/stats/cri_stats_provider.go b/pkg/kubelet/stats/cri_stats_provider.go index ef48aec3e5d..c220795fec6 100644 --- a/pkg/kubelet/stats/cri_stats_provider.go +++ b/pkg/kubelet/stats/cri_stats_provider.go @@ -17,7 +17,6 @@ limitations under the License. package stats import ( - "context" "errors" "fmt" "path" @@ -103,9 +102,9 @@ func newCRIStatsProvider( } // ListPodStats returns the stats of all the pod-managed containers. -func (p *criStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) { +func (p *criStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { // Don't update CPU nano core usage. - return p.listPodStats(ctx, false) + return p.listPodStats(false) } // ListPodStatsAndUpdateCPUNanoCoreUsage updates the cpu nano core usage for @@ -118,12 +117,12 @@ func (p *criStatsProvider) ListPodStats(ctx context.Context) ([]statsapi.PodStat // vary and the usage could be incoherent (e.g., spiky). If no caller calls // this function, the cpu usage will stay nil. Right now, eviction manager is // the only caller, and it calls this function every 10s. -func (p *criStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) { +func (p *criStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) { // Update CPU nano core usage. - return p.listPodStats(ctx, true) + return p.listPodStats(true) } -func (p *criStatsProvider) listPodStats(ctx context.Context, updateCPUNanoCoreUsage bool) ([]statsapi.PodStats, error) { +func (p *criStatsProvider) listPodStats(updateCPUNanoCoreUsage bool) ([]statsapi.PodStats, error) { // Gets node root filesystem information, which will be used to populate // the available and capacity bytes/inodes in container stats. rootFsInfo, err := p.cadvisor.RootFsInfo() @@ -131,13 +130,13 @@ func (p *criStatsProvider) listPodStats(ctx context.Context, updateCPUNanoCoreUs return nil, fmt.Errorf("failed to get rootFs info: %v", err) } - containerMap, podSandboxMap, err := p.getPodAndContainerMaps(ctx) + containerMap, podSandboxMap, err := p.getPodAndContainerMaps() if err != nil { return nil, fmt.Errorf("failed to get pod or container map: %v", err) } if p.podAndContainerStatsFromCRI { - _, err := p.listPodStatsStrictlyFromCRI(ctx, updateCPUNanoCoreUsage, containerMap, podSandboxMap, &rootFsInfo) + _, err := p.listPodStatsStrictlyFromCRI(updateCPUNanoCoreUsage, containerMap, podSandboxMap, &rootFsInfo) if err != nil { s, ok := status.FromError(err) // Legitimate failure, rather than the CRI implementation does not support ListPodSandboxStats. @@ -150,10 +149,10 @@ func (p *criStatsProvider) listPodStats(ctx context.Context, updateCPUNanoCoreUs ) } } - return p.listPodStatsPartiallyFromCRI(ctx, updateCPUNanoCoreUsage, containerMap, podSandboxMap, &rootFsInfo) + return p.listPodStatsPartiallyFromCRI(updateCPUNanoCoreUsage, containerMap, podSandboxMap, &rootFsInfo) } -func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, updateCPUNanoCoreUsage bool, containerMap map[string]*runtimeapi.Container, podSandboxMap map[string]*runtimeapi.PodSandbox, rootFsInfo *cadvisorapiv2.FsInfo) ([]statsapi.PodStats, error) { +func (p *criStatsProvider) listPodStatsPartiallyFromCRI(updateCPUNanoCoreUsage bool, containerMap map[string]*runtimeapi.Container, podSandboxMap map[string]*runtimeapi.PodSandbox, rootFsInfo *cadvisorapiv2.FsInfo) ([]statsapi.PodStats, error) { // fsIDtoInfo is a map from filesystem id to its stats. This will be used // as a cache to avoid querying cAdvisor for the filesystem stats with the // same filesystem id many times. @@ -162,7 +161,7 @@ func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, upd // sandboxIDToPodStats is a temporary map from sandbox ID to its pod stats. sandboxIDToPodStats := make(map[string]*statsapi.PodStats) - resp, err := p.runtimeService.ListContainerStats(ctx, &runtimeapi.ContainerStatsFilter{}) + resp, err := p.runtimeService.ListContainerStats(&runtimeapi.ContainerStatsFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all container stats: %v", err) } @@ -227,8 +226,8 @@ func (p *criStatsProvider) listPodStatsPartiallyFromCRI(ctx context.Context, upd return result, nil } -func (p *criStatsProvider) listPodStatsStrictlyFromCRI(ctx context.Context, updateCPUNanoCoreUsage bool, containerMap map[string]*runtimeapi.Container, podSandboxMap map[string]*runtimeapi.PodSandbox, rootFsInfo *cadvisorapiv2.FsInfo) ([]statsapi.PodStats, error) { - criSandboxStats, err := p.runtimeService.ListPodSandboxStats(ctx, &runtimeapi.PodSandboxStatsFilter{}) +func (p *criStatsProvider) listPodStatsStrictlyFromCRI(updateCPUNanoCoreUsage bool, containerMap map[string]*runtimeapi.Container, podSandboxMap map[string]*runtimeapi.PodSandbox, rootFsInfo *cadvisorapiv2.FsInfo) ([]statsapi.PodStats, error) { + criSandboxStats, err := p.runtimeService.ListPodSandboxStats(&runtimeapi.PodSandboxStatsFilter{}) if err != nil { return nil, err } @@ -265,17 +264,17 @@ func (p *criStatsProvider) listPodStatsStrictlyFromCRI(ctx context.Context, upda } // ListPodCPUAndMemoryStats returns the CPU and Memory stats of all the pod-managed containers. -func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) { +func (p *criStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { // sandboxIDToPodStats is a temporary map from sandbox ID to its pod stats. sandboxIDToPodStats := make(map[string]*statsapi.PodStats) - containerMap, podSandboxMap, err := p.getPodAndContainerMaps(ctx) + containerMap, podSandboxMap, err := p.getPodAndContainerMaps() if err != nil { return nil, fmt.Errorf("failed to get pod or container map: %v", err) } result := make([]statsapi.PodStats, 0, len(podSandboxMap)) if p.podAndContainerStatsFromCRI { - criSandboxStats, err := p.runtimeService.ListPodSandboxStats(ctx, &runtimeapi.PodSandboxStatsFilter{}) + criSandboxStats, err := p.runtimeService.ListPodSandboxStats(&runtimeapi.PodSandboxStatsFilter{}) // Call succeeded if err == nil { for _, criSandboxStat := range criSandboxStats { @@ -302,7 +301,7 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]stat ) } - resp, err := p.runtimeService.ListContainerStats(ctx, &runtimeapi.ContainerStatsFilter{}) + resp, err := p.runtimeService.ListContainerStats(&runtimeapi.ContainerStatsFilter{}) if err != nil { return nil, fmt.Errorf("failed to list all container stats: %v", err) } @@ -357,15 +356,15 @@ func (p *criStatsProvider) ListPodCPUAndMemoryStats(ctx context.Context) ([]stat return result, nil } -func (p *criStatsProvider) getPodAndContainerMaps(ctx context.Context) (map[string]*runtimeapi.Container, map[string]*runtimeapi.PodSandbox, error) { - containers, err := p.runtimeService.ListContainers(ctx, &runtimeapi.ContainerFilter{}) +func (p *criStatsProvider) getPodAndContainerMaps() (map[string]*runtimeapi.Container, map[string]*runtimeapi.PodSandbox, error) { + containers, err := p.runtimeService.ListContainers(&runtimeapi.ContainerFilter{}) if err != nil { return nil, nil, fmt.Errorf("failed to list all containers: %v", err) } // Creates pod sandbox map between the pod sandbox ID and the PodSandbox object. podSandboxMap := make(map[string]*runtimeapi.PodSandbox) - podSandboxes, err := p.runtimeService.ListPodSandbox(ctx, &runtimeapi.PodSandboxFilter{}) + podSandboxes, err := p.runtimeService.ListPodSandbox(&runtimeapi.PodSandboxFilter{}) if err != nil { return nil, nil, fmt.Errorf("failed to list all pod sandboxes: %v", err) } @@ -384,8 +383,8 @@ func (p *criStatsProvider) getPodAndContainerMaps(ctx context.Context) (map[stri } // ImageFsStats returns the stats of the image filesystem. -func (p *criStatsProvider) ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) { - resp, err := p.imageService.ImageFsInfo(ctx) +func (p *criStatsProvider) ImageFsStats() (*statsapi.FsStats, error) { + resp, err := p.imageService.ImageFsInfo() if err != nil { return nil, err } @@ -421,8 +420,8 @@ func (p *criStatsProvider) ImageFsStats(ctx context.Context) (*statsapi.FsStats, // ImageFsDevice returns name of the device where the image filesystem locates, // e.g. /dev/sda1. -func (p *criStatsProvider) ImageFsDevice(ctx context.Context) (string, error) { - resp, err := p.imageService.ImageFsInfo(ctx) +func (p *criStatsProvider) ImageFsDevice() (string, error) { + resp, err := p.imageService.ImageFsInfo() if err != nil { return "", err } diff --git a/pkg/kubelet/stats/cri_stats_provider_test.go b/pkg/kubelet/stats/cri_stats_provider_test.go index 27bce9d628a..5fcc2c4429c 100644 --- a/pkg/kubelet/stats/cri_stats_provider_test.go +++ b/pkg/kubelet/stats/cri_stats_provider_test.go @@ -17,7 +17,6 @@ limitations under the License. package stats import ( - "context" "math/rand" "os" "path/filepath" @@ -85,7 +84,6 @@ const ( ) func TestCRIListPodStats(t *testing.T) { - ctx := context.Background() var ( imageFsMountpoint = "/test/mount/point" unknownMountpoint = "/unknown/mount/point" @@ -240,7 +238,7 @@ func TestCRIListPodStats(t *testing.T) { false, ) - stats, err := provider.ListPodStats(ctx) + stats, err := provider.ListPodStats() assert := assert.New(t) assert.NoError(err) assert.Equal(4, len(stats)) @@ -325,7 +323,6 @@ func TestCRIListPodStats(t *testing.T) { } func TestCRIListPodCPUAndMemoryStats(t *testing.T) { - ctx := context.Background() var ( imageFsMountpoint = "/test/mount/point" @@ -438,7 +435,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) { false, ) - stats, err := provider.ListPodCPUAndMemoryStats(ctx) + stats, err := provider.ListPodCPUAndMemoryStats() assert := assert.New(t) assert.NoError(err) assert.Equal(5, len(stats)) @@ -535,7 +532,6 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) { } func TestCRIImagesFsStats(t *testing.T) { - ctx := context.Background() var ( imageFsMountpoint = "/test/mount/point" imageFsInfo = getTestFsInfo(2000) @@ -569,7 +565,7 @@ func TestCRIImagesFsStats(t *testing.T) { false, ) - stats, err := provider.ImageFsStats(ctx) + stats, err := provider.ImageFsStats() assert := assert.New(t) assert.NoError(err) diff --git a/pkg/kubelet/stats/provider.go b/pkg/kubelet/stats/provider.go index 5a8bb4abfa3..3e8a1d38de5 100644 --- a/pkg/kubelet/stats/provider.go +++ b/pkg/kubelet/stats/provider.go @@ -17,7 +17,6 @@ limitations under the License. package stats import ( - "context" "fmt" cadvisorapiv1 "github.com/google/cadvisor/info/v1" @@ -91,11 +90,11 @@ type Provider struct { // containerStatsProvider is an interface that provides the stats of the // containers managed by pods. type containerStatsProvider interface { - ListPodStats(ctx context.Context) ([]statsapi.PodStats, error) - ListPodStatsAndUpdateCPUNanoCoreUsage(ctx context.Context) ([]statsapi.PodStats, error) - ListPodCPUAndMemoryStats(ctx context.Context) ([]statsapi.PodStats, error) - ImageFsStats(ctx context.Context) (*statsapi.FsStats, error) - ImageFsDevice(ctx context.Context) (string, error) + ListPodStats() ([]statsapi.PodStats, error) + ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) + ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) + ImageFsStats() (*statsapi.FsStats, error) + ImageFsDevice() (string, error) } type rlimitStatsProvider interface { @@ -164,12 +163,12 @@ func (p *Provider) RootFsStats() (*statsapi.FsStats, error) { } // GetContainerInfo returns stats (from cAdvisor) for a container. -func (p *Provider) GetContainerInfo(ctx context.Context, podFullName string, podUID types.UID, containerName string, req *cadvisorapiv1.ContainerInfoRequest) (*cadvisorapiv1.ContainerInfo, error) { +func (p *Provider) GetContainerInfo(podFullName string, podUID types.UID, containerName string, req *cadvisorapiv1.ContainerInfoRequest) (*cadvisorapiv1.ContainerInfo, error) { // Resolve and type convert back again. // We need the static pod UID but the kubecontainer API works with types.UID. podUID = types.UID(p.podManager.TranslatePodUID(podUID)) - pods, err := p.runtimeCache.GetPods(ctx) + pods, err := p.runtimeCache.GetPods() if err != nil { return nil, err } @@ -202,8 +201,8 @@ func (p *Provider) GetRawContainerInfo(containerName string, req *cadvisorapiv1. } // HasDedicatedImageFs returns true if a dedicated image filesystem exists for storing images. -func (p *Provider) HasDedicatedImageFs(ctx context.Context) (bool, error) { - device, err := p.containerStatsProvider.ImageFsDevice(ctx) +func (p *Provider) HasDedicatedImageFs() (bool, error) { + device, err := p.containerStatsProvider.ImageFsDevice() if err != nil { return false, err } diff --git a/pkg/kubelet/stats/provider_test.go b/pkg/kubelet/stats/provider_test.go index 4c56a3ec48d..fe6a231b621 100644 --- a/pkg/kubelet/stats/provider_test.go +++ b/pkg/kubelet/stats/provider_test.go @@ -17,7 +17,6 @@ limitations under the License. package stats import ( - "context" "fmt" "testing" "time" @@ -178,7 +177,6 @@ func TestRootFsStats(t *testing.T) { } func TestGetContainerInfo(t *testing.T) { - ctx := context.Background() cadvisorAPIFailure := fmt.Errorf("cAdvisor failure") runtimeError := fmt.Errorf("List containers error") tests := []struct { @@ -338,13 +336,13 @@ func TestGetContainerInfo(t *testing.T) { ) mockPodManager.EXPECT().TranslatePodUID(tc.requestedPodUID).Return(kubetypes.ResolvedPodUID(tc.requestedPodUID)) - mockRuntimeCache.EXPECT().GetPods(ctx).Return(tc.podList, tc.runtimeError) + mockRuntimeCache.EXPECT().GetPods().Return(tc.podList, tc.runtimeError) if tc.expectDockerContainerCall { mockCadvisor.EXPECT().DockerContainer(tc.containerID, cadvisorReq).Return(tc.cadvisorContainerInfo, tc.mockError) } provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{}) - stats, err := provider.GetContainerInfo(ctx, tc.requestedPodFullName, tc.requestedPodUID, tc.requestedContainerName, cadvisorReq) + stats, err := provider.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUID, tc.requestedContainerName, cadvisorReq) assert.Equal(t, tc.expectedError, err) if tc.expectStats { @@ -412,7 +410,6 @@ func TestGetRawContainerInfoSubcontainers(t *testing.T) { } func TestHasDedicatedImageFs(t *testing.T) { - ctx := context.Background() mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() @@ -443,7 +440,7 @@ func TestHasDedicatedImageFs(t *testing.T) { provider := newStatsProvider(mockCadvisor, mockPodManager, mockRuntimeCache, fakeContainerStatsProvider{ device: test.imagefsDevice, }) - dedicated, err := provider.HasDedicatedImageFs(ctx) + dedicated, err := provider.HasDedicatedImageFs() assert.NoError(t, err) assert.Equal(t, test.dedicated, dedicated) } @@ -732,11 +729,9 @@ type fakeResourceAnalyzer struct { podVolumeStats serverstats.PodVolumeStats } -func (o *fakeResourceAnalyzer) Start() {} -func (o *fakeResourceAnalyzer) Get(context.Context, bool) (*statsapi.Summary, error) { return nil, nil } -func (o *fakeResourceAnalyzer) GetCPUAndMemoryStats(context.Context) (*statsapi.Summary, error) { - return nil, nil -} +func (o *fakeResourceAnalyzer) Start() {} +func (o *fakeResourceAnalyzer) Get(bool) (*statsapi.Summary, error) { return nil, nil } +func (o *fakeResourceAnalyzer) GetCPUAndMemoryStats() (*statsapi.Summary, error) { return nil, nil } func (o *fakeResourceAnalyzer) GetPodVolumeStats(uid types.UID) (serverstats.PodVolumeStats, bool) { return o.podVolumeStats, true } @@ -745,22 +740,22 @@ type fakeContainerStatsProvider struct { device string } -func (p fakeContainerStatsProvider) ListPodStats(context.Context) ([]statsapi.PodStats, error) { +func (p fakeContainerStatsProvider) ListPodStats() ([]statsapi.PodStats, error) { return nil, fmt.Errorf("not implemented") } -func (p fakeContainerStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage(context.Context) ([]statsapi.PodStats, error) { +func (p fakeContainerStatsProvider) ListPodStatsAndUpdateCPUNanoCoreUsage() ([]statsapi.PodStats, error) { return nil, fmt.Errorf("not implemented") } -func (p fakeContainerStatsProvider) ListPodCPUAndMemoryStats(context.Context) ([]statsapi.PodStats, error) { +func (p fakeContainerStatsProvider) ListPodCPUAndMemoryStats() ([]statsapi.PodStats, error) { return nil, fmt.Errorf("not implemented") } -func (p fakeContainerStatsProvider) ImageFsStats(context.Context) (*statsapi.FsStats, error) { +func (p fakeContainerStatsProvider) ImageFsStats() (*statsapi.FsStats, error) { return nil, fmt.Errorf("not implemented") } -func (p fakeContainerStatsProvider) ImageFsDevice(context.Context) (string, error) { +func (p fakeContainerStatsProvider) ImageFsDevice() (string, error) { return p.device, nil } diff --git a/staging/src/k8s.io/cri-api/pkg/apis/services.go b/staging/src/k8s.io/cri-api/pkg/apis/services.go index 9aff39ed28e..55f631738b7 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/services.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/services.go @@ -17,7 +17,6 @@ limitations under the License. package cri import ( - "context" "time" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" @@ -26,40 +25,40 @@ import ( // RuntimeVersioner contains methods for runtime name, version and API version. type RuntimeVersioner interface { // Version returns the runtime name, runtime version and runtime API version - Version(ctx context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) + Version(apiVersion string) (*runtimeapi.VersionResponse, error) } // ContainerManager contains methods to manipulate containers managed by a // container runtime. The methods are thread-safe. type ContainerManager interface { // CreateContainer creates a new container in specified PodSandbox. - CreateContainer(ctx context.Context, podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) // StartContainer starts the container. - StartContainer(ctx context.Context, containerID string) error + StartContainer(containerID string) error // StopContainer stops a running container with a grace period (i.e., timeout). - StopContainer(ctx context.Context, containerID string, timeout int64) error + StopContainer(containerID string, timeout int64) error // RemoveContainer removes the container. - RemoveContainer(ctx context.Context, containerID string) error + RemoveContainer(containerID string) error // ListContainers lists all containers by filters. - ListContainers(ctx context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) + ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) // ContainerStatus returns the status of the container. - ContainerStatus(ctx context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) + ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) // UpdateContainerResources updates ContainerConfig of the container synchronously. // If runtime fails to transactionally update the requested resources, an error is returned. - UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) error + UpdateContainerResources(containerID string, resources *runtimeapi.ContainerResources) error // ExecSync executes a command in the container, and returns the stdout output. // If command exits with a non-zero exit code, an error is returned. - ExecSync(ctx context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) + ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) // Exec prepares a streaming endpoint to execute a command in the container, and returns the address. - Exec(context.Context, *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) + Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) // Attach prepares a streaming endpoint to attach to a running container, and returns the address. - Attach(ctx context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) + Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) // ReopenContainerLog asks runtime to reopen the stdout/stderr log file // for the container. If it returns error, new container log file MUST NOT // be created. - ReopenContainerLog(ctx context.Context, ContainerID string) error + ReopenContainerLog(ContainerID string) error // CheckpointContainer checkpoints a container - CheckpointContainer(ctx context.Context, options *runtimeapi.CheckpointContainerRequest) error + CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error // GetContainerEvents gets container events from the CRI runtime GetContainerEvents(containerEventsCh chan *runtimeapi.ContainerEventResponse) error } @@ -69,19 +68,19 @@ type ContainerManager interface { type PodSandboxManager interface { // RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure // the sandbox is in ready state. - RunPodSandbox(ctx context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) + RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) // StopPodSandbox stops the sandbox. If there are any running containers in the // sandbox, they should be force terminated. - StopPodSandbox(pctx context.Context, odSandboxID string) error + StopPodSandbox(podSandboxID string) error // RemovePodSandbox removes the sandbox. If there are running containers in the // sandbox, they should be forcibly removed. - RemovePodSandbox(ctx context.Context, podSandboxID string) error + RemovePodSandbox(podSandboxID string) error // PodSandboxStatus returns the Status of the PodSandbox. - PodSandboxStatus(ctx context.Context, podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) + PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) // ListPodSandbox returns a list of Sandbox. - ListPodSandbox(ctx context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) + ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) // PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address. - PortForward(context.Context, *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) + PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) } // ContainerStatsManager contains methods for retrieving the container @@ -89,14 +88,14 @@ type PodSandboxManager interface { type ContainerStatsManager interface { // ContainerStats returns stats of the container. If the container does not // exist, the call returns an error. - ContainerStats(ctx context.Context, containerID string) (*runtimeapi.ContainerStats, error) + ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) // ListContainerStats returns stats of all running containers. - ListContainerStats(ctx context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) + ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) // PodSandboxStats returns stats of the pod. If the pod does not // exist, the call returns an error. - PodSandboxStats(ctx context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) + PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) // ListPodSandboxStats returns stats of all running pods. - ListPodSandboxStats(ctx context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) + ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) } // RuntimeService interface should be implemented by a container runtime. @@ -108,9 +107,9 @@ type RuntimeService interface { ContainerStatsManager // UpdateRuntimeConfig updates runtime configuration if specified - UpdateRuntimeConfig(ctx context.Context, runtimeConfig *runtimeapi.RuntimeConfig) error + UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) error // Status returns the status of the runtime. - Status(ctx context.Context, verbose bool) (*runtimeapi.StatusResponse, error) + Status(verbose bool) (*runtimeapi.StatusResponse, error) } // ImageManagerService interface should be implemented by a container image @@ -118,13 +117,13 @@ type RuntimeService interface { // The methods should be thread-safe. type ImageManagerService interface { // ListImages lists the existing images. - ListImages(ctx context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) + ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) // ImageStatus returns the status of the image. - ImageStatus(ctx context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) + ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) // PullImage pulls an image with the authentication config. - PullImage(ctx context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) + PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) // RemoveImage removes the image. - RemoveImage(ctx context.Context, image *runtimeapi.ImageSpec) error + RemoveImage(image *runtimeapi.ImageSpec) error // ImageFsInfo returns information of the filesystem that is used to store images. - ImageFsInfo(ctx context.Context) ([]*runtimeapi.FilesystemUsage, error) + ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) } diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go index 4c16581ba13..dc1bcaf97e5 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_image_service.go @@ -17,7 +17,6 @@ limitations under the License. package testing import ( - "context" "sync" "testing" @@ -132,7 +131,7 @@ func (r *FakeImageService) popError(f string) error { } // ListImages returns the list of images from FakeImageService or error if it was previously set. -func (r *FakeImageService) ListImages(_ context.Context, filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { +func (r *FakeImageService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) { r.Lock() defer r.Unlock() @@ -155,7 +154,7 @@ func (r *FakeImageService) ListImages(_ context.Context, filter *runtimeapi.Imag } // ImageStatus returns the status of the image from the FakeImageService. -func (r *FakeImageService) ImageStatus(_ context.Context, image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { +func (r *FakeImageService) ImageStatus(image *runtimeapi.ImageSpec, verbose bool) (*runtimeapi.ImageStatusResponse, error) { r.Lock() defer r.Unlock() @@ -168,7 +167,7 @@ func (r *FakeImageService) ImageStatus(_ context.Context, image *runtimeapi.Imag } // PullImage emulate pulling the image from the FakeImageService. -func (r *FakeImageService) PullImage(_ context.Context, image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *FakeImageService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig, podSandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { r.Lock() defer r.Unlock() @@ -189,7 +188,7 @@ func (r *FakeImageService) PullImage(_ context.Context, image *runtimeapi.ImageS } // RemoveImage removes image from the FakeImageService. -func (r *FakeImageService) RemoveImage(_ context.Context, image *runtimeapi.ImageSpec) error { +func (r *FakeImageService) RemoveImage(image *runtimeapi.ImageSpec) error { r.Lock() defer r.Unlock() @@ -205,7 +204,7 @@ func (r *FakeImageService) RemoveImage(_ context.Context, image *runtimeapi.Imag } // ImageFsInfo returns information of the filesystem that is used to store images. -func (r *FakeImageService) ImageFsInfo(_ context.Context) ([]*runtimeapi.FilesystemUsage, error) { +func (r *FakeImageService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) { r.Lock() defer r.Unlock() diff --git a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go index fc18a372afd..635295e3d71 100644 --- a/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go +++ b/staging/src/k8s.io/cri-api/pkg/apis/testing/fake_runtime_service.go @@ -17,7 +17,6 @@ limitations under the License. package testing import ( - "context" "fmt" "reflect" "sync" @@ -163,7 +162,7 @@ func NewFakeRuntimeService() *FakeRuntimeService { } // Version returns version information from the FakeRuntimeService. -func (r *FakeRuntimeService) Version(_ context.Context, apiVersion string) (*runtimeapi.VersionResponse, error) { +func (r *FakeRuntimeService) Version(apiVersion string) (*runtimeapi.VersionResponse, error) { r.Lock() defer r.Unlock() @@ -181,7 +180,7 @@ func (r *FakeRuntimeService) Version(_ context.Context, apiVersion string) (*run } // Status returns runtime status of the FakeRuntimeService. -func (r *FakeRuntimeService) Status(_ context.Context, verbose bool) (*runtimeapi.StatusResponse, error) { +func (r *FakeRuntimeService) Status(verbose bool) (*runtimeapi.StatusResponse, error) { r.Lock() defer r.Unlock() @@ -194,7 +193,7 @@ func (r *FakeRuntimeService) Status(_ context.Context, verbose bool) (*runtimeap } // RunPodSandbox emulates the run of the pod sandbox in the FakeRuntimeService. -func (r *FakeRuntimeService) RunPodSandbox(_ context.Context, config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { +func (r *FakeRuntimeService) RunPodSandbox(config *runtimeapi.PodSandboxConfig, runtimeHandler string) (string, error) { r.Lock() defer r.Unlock() @@ -247,7 +246,7 @@ func (r *FakeRuntimeService) RunPodSandbox(_ context.Context, config *runtimeapi } // StopPodSandbox emulates the stop of pod sandbox in the FakeRuntimeService. -func (r *FakeRuntimeService) StopPodSandbox(_ context.Context, podSandboxID string) error { +func (r *FakeRuntimeService) StopPodSandbox(podSandboxID string) error { r.Lock() defer r.Unlock() @@ -266,7 +265,7 @@ func (r *FakeRuntimeService) StopPodSandbox(_ context.Context, podSandboxID stri } // RemovePodSandbox emulates removal of the pod sadbox in the FakeRuntimeService. -func (r *FakeRuntimeService) RemovePodSandbox(_ context.Context, podSandboxID string) error { +func (r *FakeRuntimeService) RemovePodSandbox(podSandboxID string) error { r.Lock() defer r.Unlock() @@ -282,7 +281,7 @@ func (r *FakeRuntimeService) RemovePodSandbox(_ context.Context, podSandboxID st } // PodSandboxStatus returns pod sandbox status from the FakeRuntimeService. -func (r *FakeRuntimeService) PodSandboxStatus(_ context.Context, podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { +func (r *FakeRuntimeService) PodSandboxStatus(podSandboxID string, verbose bool) (*runtimeapi.PodSandboxStatusResponse, error) { r.Lock() defer r.Unlock() @@ -301,7 +300,7 @@ func (r *FakeRuntimeService) PodSandboxStatus(_ context.Context, podSandboxID st } // ListPodSandbox returns the list of pod sandboxes in the FakeRuntimeService. -func (r *FakeRuntimeService) ListPodSandbox(_ context.Context, filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { +func (r *FakeRuntimeService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) { r.Lock() defer r.Unlock() @@ -339,7 +338,7 @@ func (r *FakeRuntimeService) ListPodSandbox(_ context.Context, filter *runtimeap } // PortForward emulates the set up of port forward in the FakeRuntimeService. -func (r *FakeRuntimeService) PortForward(context.Context, *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { +func (r *FakeRuntimeService) PortForward(*runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) { r.Lock() defer r.Unlock() @@ -352,7 +351,7 @@ func (r *FakeRuntimeService) PortForward(context.Context, *runtimeapi.PortForwar } // CreateContainer emulates container creation in the FakeRuntimeService. -func (r *FakeRuntimeService) CreateContainer(_ context.Context, podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { +func (r *FakeRuntimeService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) { r.Lock() defer r.Unlock() @@ -386,7 +385,7 @@ func (r *FakeRuntimeService) CreateContainer(_ context.Context, podSandboxID str } // StartContainer emulates start of a container in the FakeRuntimeService. -func (r *FakeRuntimeService) StartContainer(_ context.Context, containerID string) error { +func (r *FakeRuntimeService) StartContainer(containerID string) error { r.Lock() defer r.Unlock() @@ -408,7 +407,7 @@ func (r *FakeRuntimeService) StartContainer(_ context.Context, containerID strin } // StopContainer emulates stop of a container in the FakeRuntimeService. -func (r *FakeRuntimeService) StopContainer(_ context.Context, containerID string, timeout int64) error { +func (r *FakeRuntimeService) StopContainer(containerID string, timeout int64) error { r.Lock() defer r.Unlock() @@ -432,7 +431,7 @@ func (r *FakeRuntimeService) StopContainer(_ context.Context, containerID string } // RemoveContainer emulates remove of a container in the FakeRuntimeService. -func (r *FakeRuntimeService) RemoveContainer(_ context.Context, containerID string) error { +func (r *FakeRuntimeService) RemoveContainer(containerID string) error { r.Lock() defer r.Unlock() @@ -448,7 +447,7 @@ func (r *FakeRuntimeService) RemoveContainer(_ context.Context, containerID stri } // ListContainers returns the list of containers in the FakeRuntimeService. -func (r *FakeRuntimeService) ListContainers(_ context.Context, filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { +func (r *FakeRuntimeService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) { r.Lock() defer r.Unlock() @@ -491,7 +490,7 @@ func (r *FakeRuntimeService) ListContainers(_ context.Context, filter *runtimeap } // ContainerStatus returns the container status given the container ID in FakeRuntimeService. -func (r *FakeRuntimeService) ContainerStatus(_ context.Context, containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { +func (r *FakeRuntimeService) ContainerStatus(containerID string, verbose bool) (*runtimeapi.ContainerStatusResponse, error) { r.Lock() defer r.Unlock() @@ -510,7 +509,7 @@ func (r *FakeRuntimeService) ContainerStatus(_ context.Context, containerID stri } // UpdateContainerResources returns the container resource in the FakeRuntimeService. -func (r *FakeRuntimeService) UpdateContainerResources(context.Context, string, *runtimeapi.ContainerResources) error { +func (r *FakeRuntimeService) UpdateContainerResources(string, *runtimeapi.ContainerResources) error { r.Lock() defer r.Unlock() @@ -519,7 +518,7 @@ func (r *FakeRuntimeService) UpdateContainerResources(context.Context, string, * } // ExecSync emulates the sync execution of a command in a container in the FakeRuntimeService. -func (r *FakeRuntimeService) ExecSync(_ context.Context, containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { +func (r *FakeRuntimeService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) { r.Lock() defer r.Unlock() @@ -529,7 +528,7 @@ func (r *FakeRuntimeService) ExecSync(_ context.Context, containerID string, cmd } // Exec emulates the execution of a command in a container in the FakeRuntimeService. -func (r *FakeRuntimeService) Exec(context.Context, *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { +func (r *FakeRuntimeService) Exec(*runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) { r.Lock() defer r.Unlock() @@ -542,7 +541,7 @@ func (r *FakeRuntimeService) Exec(context.Context, *runtimeapi.ExecRequest) (*ru } // Attach emulates the attach request in the FakeRuntimeService. -func (r *FakeRuntimeService) Attach(_ context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { +func (r *FakeRuntimeService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) { r.Lock() defer r.Unlock() @@ -555,7 +554,7 @@ func (r *FakeRuntimeService) Attach(_ context.Context, req *runtimeapi.AttachReq } // UpdateRuntimeConfig emulates the update of a runtime config for the FakeRuntimeService. -func (r *FakeRuntimeService) UpdateRuntimeConfig(_ context.Context, runtimeCOnfig *runtimeapi.RuntimeConfig) error { +func (r *FakeRuntimeService) UpdateRuntimeConfig(runtimeCOnfig *runtimeapi.RuntimeConfig) error { r.Lock() defer r.Unlock() @@ -575,7 +574,7 @@ func (r *FakeRuntimeService) SetFakeContainerStats(containerStats []*runtimeapi. } // ContainerStats returns the container stats in the FakeRuntimeService. -func (r *FakeRuntimeService) ContainerStats(_ context.Context, containerID string) (*runtimeapi.ContainerStats, error) { +func (r *FakeRuntimeService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) { r.Lock() defer r.Unlock() @@ -592,7 +591,7 @@ func (r *FakeRuntimeService) ContainerStats(_ context.Context, containerID strin } // ListContainerStats returns the list of all container stats given the filter in the FakeRuntimeService. -func (r *FakeRuntimeService) ListContainerStats(_ context.Context, filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { +func (r *FakeRuntimeService) ListContainerStats(filter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) { r.Lock() defer r.Unlock() @@ -636,7 +635,7 @@ func (r *FakeRuntimeService) SetFakePodSandboxStats(podStats []*runtimeapi.PodSa } // PodSandboxStats returns the sandbox stats in the FakeRuntimeService. -func (r *FakeRuntimeService) PodSandboxStats(_ context.Context, podSandboxID string) (*runtimeapi.PodSandboxStats, error) { +func (r *FakeRuntimeService) PodSandboxStats(podSandboxID string) (*runtimeapi.PodSandboxStats, error) { r.Lock() defer r.Unlock() @@ -653,7 +652,7 @@ func (r *FakeRuntimeService) PodSandboxStats(_ context.Context, podSandboxID str } // ListPodSandboxStats returns the list of all pod sandbox stats given the filter in the FakeRuntimeService. -func (r *FakeRuntimeService) ListPodSandboxStats(_ context.Context, filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { +func (r *FakeRuntimeService) ListPodSandboxStats(filter *runtimeapi.PodSandboxStatsFilter) ([]*runtimeapi.PodSandboxStats, error) { r.Lock() defer r.Unlock() @@ -683,7 +682,7 @@ func (r *FakeRuntimeService) ListPodSandboxStats(_ context.Context, filter *runt } // ReopenContainerLog emulates call to the reopen container log in the FakeRuntimeService. -func (r *FakeRuntimeService) ReopenContainerLog(_ context.Context, containerID string) error { +func (r *FakeRuntimeService) ReopenContainerLog(containerID string) error { r.Lock() defer r.Unlock() @@ -697,7 +696,7 @@ func (r *FakeRuntimeService) ReopenContainerLog(_ context.Context, containerID s } // CheckpointContainer emulates call to checkpoint a container in the FakeRuntimeService. -func (r *FakeRuntimeService) CheckpointContainer(_ context.Context, options *runtimeapi.CheckpointContainerRequest) error { +func (r *FakeRuntimeService) CheckpointContainer(options *runtimeapi.CheckpointContainerRequest) error { r.Lock() defer r.Unlock() diff --git a/test/e2e_node/container_log_rotation_test.go b/test/e2e_node/container_log_rotation_test.go index 446b4b7c505..951b63ada1f 100644 --- a/test/e2e_node/container_log_rotation_test.go +++ b/test/e2e_node/container_log_rotation_test.go @@ -17,7 +17,6 @@ limitations under the License. package e2enode import ( - "context" "time" v1 "k8s.io/api/core/v1" @@ -78,7 +77,7 @@ var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID r, _, err := getCRIClient() framework.ExpectNoError(err) - resp, err := r.ContainerStatus(context.Background(), id, false) + resp, err := r.ContainerStatus(id, false) framework.ExpectNoError(err) logPath := resp.GetStatus().GetLogPath() ginkgo.By("wait for container log being rotated to max file limit") diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index a35416f26b1..bfba5a66d20 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -20,7 +20,6 @@ limitations under the License. package e2enode import ( - "context" "fmt" "os/exec" "path" @@ -161,7 +160,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() { ginkgo.By("Dump all running containers") runtime, _, err := getCRIClient() framework.ExpectNoError(err) - containers, err := runtime.ListContainers(context.Background(), &runtimeapi.ContainerFilter{ + containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{ State: &runtimeapi.ContainerStateValue{ State: runtimeapi.ContainerState_CONTAINER_RUNNING, }, diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 9c191ae4e89..12c95352883 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -116,7 +116,7 @@ func waitForContainerRemoval(containerName, podName, podNS string) { rs, _, err := getCRIClient() framework.ExpectNoError(err) gomega.Eventually(func() bool { - containers, err := rs.ListContainers(context.Background(), &runtimeapi.ContainerFilter{ + containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: podName, types.KubernetesPodNamespaceLabel: podNS, diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 291c360a32b..47689c8e0b0 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -155,7 +155,7 @@ func containerGCTest(f *framework.Framework, test testRun) { // Initialize the getContainerNames function to use CRI runtime client. pod.getContainerNames = func() ([]string, error) { relevantContainers := []string{} - containers, err := runtime.ListContainers(context.Background(), &runtimeapi.ContainerFilter{ + containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: pod.podName, types.KubernetesPodNamespaceLabel: f.Namespace.Name, diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go index 50a82625adf..a4b31a9dac5 100644 --- a/test/e2e_node/image_list.go +++ b/test/e2e_node/image_list.go @@ -125,11 +125,11 @@ func (rp *remotePuller) Name() string { } func (rp *remotePuller) Pull(image string) ([]byte, error) { - resp, err := rp.imageService.ImageStatus(context.Background(), &runtimeapi.ImageSpec{Image: image}, false) + resp, err := rp.imageService.ImageStatus(&runtimeapi.ImageSpec{Image: image}, false) if err == nil && resp.GetImage() != nil { return nil, nil } - _, err = rp.imageService.PullImage(context.Background(), &runtimeapi.ImageSpec{Image: image}, nil, nil) + _, err = rp.imageService.PullImage(&runtimeapi.ImageSpec{Image: image}, nil, nil) return nil, err } diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index 4e536a67240..51beb171fd4 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -382,7 +382,7 @@ func waitForAllContainerRemoval(podName, podNS string) { rs, _, err := getCRIClient() framework.ExpectNoError(err) gomega.Eventually(func() bool { - containers, err := rs.ListContainers(context.Background(), &runtimeapi.ContainerFilter{ + containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{ LabelSelector: map[string]string{ types.KubernetesPodNameLabel: podName, types.KubernetesPodNamespaceLabel: podNS,