
A number of race conditions exist when pods are terminated early in their lifecycle because components in the kubelet need to know "no running containers" or "containers can't be started from now on" but were relying on outdated state. Only the pod worker knows whether containers are being started for a given pod, which is required to know when a pod is "terminated" (no running containers, none coming). Move that responsibility and podKiller function into the pod workers, and have everything that was killing the pod go into the UpdatePod loop. Split syncPod into three phases - setup, terminate containers, and cleanup pod - and have transitions between those methods be visible to other components. After this change, to kill a pod you tell the pod worker to UpdatePod({UpdateType: SyncPodKill, Pod: pod}). Several places in the kubelet were incorrect about whether they were handling terminating (should stop running, might have containers) or terminated (no running containers) pods. The pod worker exposes methods that allow other loops to know when to set up or tear down resources based on the state of the pod - these methods remove the possibility of race conditions by ensuring a single component is responsible for knowing each pod's allowed state and other components simply delegate to checking whether they are in the window by UID. Removing containers now no longer blocks final pod deletion in the API server and are handled as background cleanup. Node shutdown no longer marks pods as failed as they can be restarted in the next step. See https://docs.google.com/document/d/1Pic5TPntdJnYfIpBeZndDelM-AbS4FN9H2GTLFhoJ04/edit# for details
179 lines
6.1 KiB
Go
179 lines
6.1 KiB
Go
/*
|
|
Copyright 2014 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package kubelet
|
|
|
|
import (
|
|
"os"
|
|
"testing"
|
|
"time"
|
|
|
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
|
"k8s.io/mount-utils"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/clock"
|
|
"k8s.io/client-go/kubernetes/fake"
|
|
"k8s.io/client-go/tools/record"
|
|
utiltesting "k8s.io/client-go/util/testing"
|
|
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
|
"k8s.io/kubernetes/pkg/kubelet/cm"
|
|
"k8s.io/kubernetes/pkg/kubelet/configmap"
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
|
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
|
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
|
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
|
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
|
"k8s.io/kubernetes/pkg/kubelet/secret"
|
|
"k8s.io/kubernetes/pkg/kubelet/server/stats"
|
|
"k8s.io/kubernetes/pkg/kubelet/status"
|
|
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
|
|
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
|
"k8s.io/kubernetes/pkg/volume"
|
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
|
"k8s.io/kubernetes/pkg/volume/util/hostutil"
|
|
)
|
|
|
|
func TestRunOnce(t *testing.T) {
|
|
cadvisor := &cadvisortest.Mock{}
|
|
cadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
|
|
cadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
Usage: 400,
|
|
Capacity: 1000,
|
|
Available: 600,
|
|
}, nil)
|
|
cadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
|
Usage: 9,
|
|
Capacity: 10,
|
|
}, nil)
|
|
fakeSecretManager := secret.NewFakeManager()
|
|
fakeConfigMapManager := configmap.NewFakeManager()
|
|
podManager := kubepod.NewBasicPodManager(
|
|
podtest.NewFakeMirrorClient(), fakeSecretManager, fakeConfigMapManager)
|
|
fakeRuntime := &containertest.FakeRuntime{}
|
|
basePath, err := utiltesting.MkTmpdir("kubelet")
|
|
if err != nil {
|
|
t.Fatalf("can't make a temp rootdir %v", err)
|
|
}
|
|
defer os.RemoveAll(basePath)
|
|
kb := &Kubelet{
|
|
rootDirectory: basePath,
|
|
recorder: &record.FakeRecorder{},
|
|
cadvisor: cadvisor,
|
|
nodeLister: testNodeLister{},
|
|
statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}),
|
|
podManager: podManager,
|
|
podWorkers: &fakePodWorkers{},
|
|
os: &containertest.FakeOS{},
|
|
containerRuntime: fakeRuntime,
|
|
reasonCache: NewReasonCache(),
|
|
clock: clock.RealClock{},
|
|
kubeClient: &fake.Clientset{},
|
|
hostname: testKubeletHostname,
|
|
nodeName: testKubeletHostname,
|
|
runtimeState: newRuntimeState(time.Second),
|
|
hostutil: hostutil.NewFakeHostUtil(nil),
|
|
}
|
|
kb.containerManager = cm.NewStubContainerManager()
|
|
|
|
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
|
kb.volumePluginMgr, err =
|
|
NewInitializedVolumePluginMgr(kb, fakeSecretManager, fakeConfigMapManager, nil, []volume.VolumePlugin{plug}, nil /* prober */)
|
|
if err != nil {
|
|
t.Fatalf("failed to initialize VolumePluginMgr: %v", err)
|
|
}
|
|
kb.volumeManager = volumemanager.NewVolumeManager(
|
|
true,
|
|
kb.nodeName,
|
|
kb.podManager,
|
|
kb.podWorkers,
|
|
kb.kubeClient,
|
|
kb.volumePluginMgr,
|
|
fakeRuntime,
|
|
kb.mounter,
|
|
kb.hostutil,
|
|
kb.getPodsDir(),
|
|
kb.recorder,
|
|
false, /* experimentalCheckNodeCapabilitiesBeforeMount */
|
|
false, /* keepTerminatedPodVolumes */
|
|
volumetest.NewBlockVolumePathHandler())
|
|
|
|
// TODO: Factor out "stats.Provider" from Kubelet so we don't have a cyclic dependency
|
|
volumeStatsAggPeriod := time.Second * 10
|
|
kb.resourceAnalyzer = stats.NewResourceAnalyzer(kb, volumeStatsAggPeriod, kb.recorder)
|
|
nodeRef := &v1.ObjectReference{
|
|
Kind: "Node",
|
|
Name: string(kb.nodeName),
|
|
UID: types.UID(kb.nodeName),
|
|
Namespace: "",
|
|
}
|
|
fakeKillPodFunc := func(pod *v1.Pod, evict bool, gracePeriodOverride *int64, fn func(*v1.PodStatus)) error {
|
|
return nil
|
|
}
|
|
fakeMirrodPodFunc := func(*v1.Pod) (*v1.Pod, bool) { return nil, false }
|
|
evictionManager, evictionAdmitHandler := eviction.NewManager(kb.resourceAnalyzer, eviction.Config{}, fakeKillPodFunc, fakeMirrodPodFunc, nil, nil, kb.recorder, nodeRef, kb.clock)
|
|
|
|
kb.evictionManager = evictionManager
|
|
kb.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
|
|
kb.mounter = mount.NewFakeMounter(nil)
|
|
if err := kb.setupDataDirs(); err != nil {
|
|
t.Errorf("Failed to init data dirs: %v", err)
|
|
}
|
|
|
|
pods := []*v1.Pod{
|
|
{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "12345678",
|
|
Name: "foo",
|
|
Namespace: "new",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{Name: "bar"},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
podManager.SetPods(pods)
|
|
// The original test here is totally meaningless, because fakeruntime will always return an empty podStatus. While
|
|
// the original logic of isPodRunning happens to return true when podstatus is empty, so the test can always pass.
|
|
// Now the logic in isPodRunning is changed, to let the test pass, we set the podstatus directly in fake runtime.
|
|
// This is also a meaningless test, because the isPodRunning will also always return true after setting this. However,
|
|
// because runonce is never used in kubernetes now, we should deprioritize the cleanup work.
|
|
// TODO(random-liu) Fix the test, make it meaningful.
|
|
fakeRuntime.PodStatus = kubecontainer.PodStatus{
|
|
ContainerStatuses: []*kubecontainer.Status{
|
|
{
|
|
Name: "bar",
|
|
State: kubecontainer.ContainerStateRunning,
|
|
},
|
|
},
|
|
}
|
|
results, err := kb.runOnce(pods, time.Millisecond)
|
|
if err != nil {
|
|
t.Errorf("unexpected error: %v", err)
|
|
}
|
|
if results[0].Err != nil {
|
|
t.Errorf("unexpected run pod error: %v", results[0].Err)
|
|
}
|
|
if results[0].Pod.Name != "foo" {
|
|
t.Errorf("unexpected pod: %q", results[0].Pod.Name)
|
|
}
|
|
}
|