diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 5691c12a269..6af5b221944 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1505,6 +1505,12 @@ func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) { // can be admitted, a brief single-word reason and a message explaining why // the pod cannot be admitted. func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) { + if rs := kl.runtimeState.networkErrors(); len(rs) != 0 { + if !podUsesHostNetwork(pod) { + return false, "NetworkNotReady", fmt.Sprintf("Network is not ready: %v", rs) + } + } + // the kubelet will invoke each pod admit handler in sequence // if any handler rejects, the pod is rejected. // TODO: move out of disk check into a pod admitter @@ -1541,7 +1547,7 @@ func (kl *Kubelet) syncLoop(updates <-chan kubetypes.PodUpdate, handler SyncHand defer housekeepingTicker.Stop() plegCh := kl.pleg.Watch() for { - if rs := kl.runtimeState.errors(); len(rs) != 0 { + if rs := kl.runtimeState.runtimeErrors(); len(rs) != 0 { glog.Infof("skipping pod synchronization - %v", rs) time.Sleep(5 * time.Second) continue diff --git a/pkg/kubelet/kubelet_node_status.go b/pkg/kubelet/kubelet_node_status.go index 0093d63ee6d..827654c2cab 100644 --- a/pkg/kubelet/kubelet_node_status.go +++ b/pkg/kubelet/kubelet_node_status.go @@ -552,7 +552,8 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) { // ref: https://github.com/kubernetes/kubernetes/issues/16961 currentTime := unversioned.NewTime(kl.clock.Now()) var newNodeReadyCondition api.NodeCondition - if rs := kl.runtimeState.errors(); len(rs) == 0 { + rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...) + if len(rs) == 0 { newNodeReadyCondition = api.NodeCondition{ Type: api.NodeReady, Status: api.ConditionTrue, diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 6e595ad9c72..6d7d2664494 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -83,6 +83,7 @@ func TestRunOnce(t *testing.T) { kubeClient: &fake.Clientset{}, hostname: testKubeletHostname, nodeName: testKubeletHostname, + runtimeState: newRuntimeState(time.Second), } kb.containerManager = cm.NewStubContainerManager() diff --git a/pkg/kubelet/runtime.go b/pkg/kubelet/runtime.go index 90a83898a31..6cb74fe364c 100644 --- a/pkg/kubelet/runtime.go +++ b/pkg/kubelet/runtime.go @@ -68,16 +68,13 @@ func (s *runtimeState) setInitError(err error) { s.initError = err } -func (s *runtimeState) errors() []string { +func (s *runtimeState) runtimeErrors() []string { s.RLock() defer s.RUnlock() var ret []string if s.initError != nil { ret = append(ret, s.initError.Error()) } - if s.networkError != nil { - ret = append(ret, s.networkError.Error()) - } if !s.lastBaseRuntimeSync.Add(s.baseRuntimeSyncThreshold).After(time.Now()) { ret = append(ret, "container runtime is down") } @@ -87,6 +84,16 @@ func (s *runtimeState) errors() []string { return ret } +func (s *runtimeState) networkErrors() []string { + s.RLock() + defer s.RUnlock() + var ret []string + if s.networkError != nil { + ret = append(ret, s.networkError.Error()) + } + return ret +} + func newRuntimeState( runtimeSyncThreshold time.Duration, ) *runtimeState {