checkLimitsForResolvConf for the pod create and update events instead of checking period
This commit is contained in:
parent
c46e8a9248
commit
4f9d2047dd
@ -181,6 +181,7 @@ go_test(
|
||||
"//pkg/kubelet/images:go_default_library",
|
||||
"//pkg/kubelet/lifecycle:go_default_library",
|
||||
"//pkg/kubelet/logs:go_default_library",
|
||||
"//pkg/kubelet/network/dns:go_default_library",
|
||||
"//pkg/kubelet/pleg:go_default_library",
|
||||
"//pkg/kubelet/pod:go_default_library",
|
||||
"//pkg/kubelet/pod/testing:go_default_library",
|
||||
|
@ -1384,11 +1384,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
// handled by pod workers).
|
||||
go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop)
|
||||
|
||||
// Start gorouting responsible for checking limits in resolv.conf
|
||||
if kl.dnsConfigurer.ResolverConfig != "" {
|
||||
go wait.Until(func() { kl.dnsConfigurer.CheckLimitsForResolvConf() }, 30*time.Second, wait.NeverStop)
|
||||
}
|
||||
|
||||
// Start component sync loops.
|
||||
kl.statusManager.Start()
|
||||
kl.probeManager.Start()
|
||||
@ -1978,6 +1973,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
|
||||
start := kl.clock.Now()
|
||||
sort.Sort(sliceutils.PodsByCreationTime(pods))
|
||||
for _, pod := range pods {
|
||||
// Responsible for checking limits in resolv.conf
|
||||
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
|
||||
kl.dnsConfigurer.CheckLimitsForResolvConf()
|
||||
}
|
||||
existingPods := kl.podManager.GetPods()
|
||||
// Always add the pod to the pod manager. Kubelet relies on the pod
|
||||
// manager as the source of truth for the desired state. If a pod does
|
||||
@ -2015,6 +2014,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
|
||||
func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) {
|
||||
start := kl.clock.Now()
|
||||
for _, pod := range pods {
|
||||
// Responsible for checking limits in resolv.conf
|
||||
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
|
||||
kl.dnsConfigurer.CheckLimitsForResolvConf()
|
||||
}
|
||||
kl.podManager.UpdatePod(pod)
|
||||
if kubepod.IsMirrorPod(pod) {
|
||||
kl.handleMirrorPod(pod, start)
|
||||
|
@ -51,6 +51,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
@ -481,6 +482,16 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
@ -518,6 +529,16 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
// default NodeName in test is 127.0.0.1
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
|
||||
@ -551,6 +572,17 @@ func TestHandleNodeSelector(t *testing.T) {
|
||||
},
|
||||
}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
|
||||
podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
|
||||
@ -582,6 +614,16 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||
}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
spec := v1.PodSpec{NodeName: string(kl.nodeName),
|
||||
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
@ -668,6 +710,16 @@ func TestHandlePluginResources(t *testing.T) {
|
||||
kl.admitHandlers = lifecycle.PodAdmitHandlers{}
|
||||
kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
// pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
|
||||
// adjusts node.allocatableResource for this resource to a sufficient value.
|
||||
fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
|
||||
|
@ -156,7 +156,7 @@ func (c *Configurer) CheckLimitsForResolvConf() {
|
||||
f, err := os.Open(c.ResolverConfig)
|
||||
if err != nil {
|
||||
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
|
||||
glog.Error("CheckLimitsForResolvConf: " + err.Error())
|
||||
glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error())
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
@ -164,7 +164,7 @@ func (c *Configurer) CheckLimitsForResolvConf() {
|
||||
_, hostSearch, _, err := parseResolvConf(f)
|
||||
if err != nil {
|
||||
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
|
||||
glog.Error("CheckLimitsForResolvConf: " + err.Error())
|
||||
glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
@ -177,14 +177,14 @@ func (c *Configurer) CheckLimitsForResolvConf() {
|
||||
if len(hostSearch) > domainCountLimit {
|
||||
log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit)
|
||||
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
|
||||
glog.Error("CheckLimitsForResolvConf: " + log)
|
||||
glog.V(4).Infof("CheckLimitsForResolvConf: " + log)
|
||||
return
|
||||
}
|
||||
|
||||
if len(strings.Join(hostSearch, " ")) > validation.MaxDNSSearchListChars {
|
||||
log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, validation.MaxDNSSearchListChars)
|
||||
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
|
||||
glog.Error("CheckLimitsForResolvConf: " + log)
|
||||
glog.V(4).Infof("CheckLimitsForResolvConf: " + log)
|
||||
return
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user