kubernetes/pkg/kubelet/prober/common_test.go
Tim Hockin 11a25bfeb6
De-share the Handler struct in core API (#105979)
* De-share the Handler struct in core API

An upcoming PR adds a handler that only applies on one of these paths.
Having fields that don't work seems bad.

This never should have been shared.  Lifecycle hooks are like a "write"
while probes are more like a "read". HTTPGet and TCPSocket don't really
make sense as lifecycle hooks (but I can't take that back). When we add
gRPC, it is EXPLICITLY a health check (defined by gRPC) not an arbitrary
RPC - so a probe makes sense but a hook does not.

In the future I can also see adding lifecycle hooks that don't make
sense as probes.  E.g. 'sleep' is a common lifecycle request. The only
option is `exec`, which requires having a sleep binary in your image.

* Run update scripts
2021-10-29 13:15:11 -07:00

155 lines
4.0 KiB
Go

/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"reflect"
"sync"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/status"
statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/utils/exec"
)
const (
testContainerName = "cOnTaInEr_NaMe"
testPodUID = "pOd_UiD"
)
var testContainerID = kubecontainer.ContainerID{Type: "test", ID: "cOnTaInEr_Id"}
func getTestRunningStatus() v1.PodStatus {
return getTestRunningStatusWithStarted(true)
}
func getTestRunningStatusWithStarted(started bool) v1.PodStatus {
containerStatus := v1.ContainerStatus{
Name: testContainerName,
ContainerID: testContainerID.String(),
}
containerStatus.State.Running = &v1.ContainerStateRunning{StartedAt: metav1.Now()}
containerStatus.Started = &started
podStatus := v1.PodStatus{
Phase: v1.PodRunning,
ContainerStatuses: []v1.ContainerStatus{containerStatus},
}
return podStatus
}
func getTestPod() *v1.Pod {
container := v1.Container{
Name: testContainerName,
}
pod := v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{container},
RestartPolicy: v1.RestartPolicyNever,
},
}
pod.Name = "testPod"
pod.UID = testPodUID
return &pod
}
func setTestProbe(pod *v1.Pod, probeType probeType, probeSpec v1.Probe) {
// All tests rely on the fake exec prober.
probeSpec.ProbeHandler = v1.ProbeHandler{
Exec: &v1.ExecAction{},
}
// Apply test defaults, overwridden for test speed.
defaults := map[string]int64{
"TimeoutSeconds": 1,
"PeriodSeconds": 1,
"SuccessThreshold": 1,
"FailureThreshold": 1,
}
for field, value := range defaults {
f := reflect.ValueOf(&probeSpec).Elem().FieldByName(field)
if f.Int() == 0 {
f.SetInt(value)
}
}
switch probeType {
case readiness:
pod.Spec.Containers[0].ReadinessProbe = &probeSpec
case liveness:
pod.Spec.Containers[0].LivenessProbe = &probeSpec
case startup:
pod.Spec.Containers[0].StartupProbe = &probeSpec
}
}
func newTestManager() *manager {
podManager := kubepod.NewBasicPodManager(nil, nil, nil)
// Add test pod to pod manager, so that status manager can get the pod from pod manager if needed.
podManager.AddPod(getTestPod())
m := NewManager(
status.NewManager(&fake.Clientset{}, podManager, &statustest.FakePodDeletionSafetyProvider{}),
results.NewManager(),
results.NewManager(),
results.NewManager(),
nil, // runner
&record.FakeRecorder{},
).(*manager)
// Don't actually execute probes.
m.prober.exec = fakeExecProber{probe.Success, nil}
return m
}
func newTestWorker(m *manager, probeType probeType, probeSpec v1.Probe) *worker {
pod := getTestPod()
setTestProbe(pod, probeType, probeSpec)
return newWorker(m, probeType, pod, pod.Spec.Containers[0])
}
type fakeExecProber struct {
result probe.Result
err error
}
func (p fakeExecProber) Probe(c exec.Cmd) (probe.Result, string, error) {
return p.result, "", p.err
}
type syncExecProber struct {
sync.RWMutex
fakeExecProber
}
func (p *syncExecProber) set(result probe.Result, err error) {
p.Lock()
defer p.Unlock()
p.result = result
p.err = err
}
func (p *syncExecProber) Probe(cmd exec.Cmd) (probe.Result, string, error) {
p.RLock()
defer p.RUnlock()
return p.fakeExecProber.Probe(cmd)
}