handling locks and update tests. Fail node if network is not initialized
This commit is contained in:
parent
cf56f7a8ef
commit
5f4570b764
@ -2425,15 +2425,18 @@ func (kl *Kubelet) syncNetworkStatus() {
|
|||||||
err = fmt.Errorf("Error on adding ip table rules: %v", err)
|
err = fmt.Errorf("Error on adding ip table rules: %v", err)
|
||||||
glog.Error(err)
|
glog.Error(err)
|
||||||
}
|
}
|
||||||
if len(kl.podCIDR) == 0 {
|
kl.networkConfigMutex.Lock()
|
||||||
|
podCIDR := kl.podCIDR
|
||||||
|
kl.networkConfigMutex.Unlock()
|
||||||
|
if len(podCIDR) == 0 {
|
||||||
err = fmt.Errorf("ConfigureCBR0 requested, but PodCIDR not set. Will not configure CBR0 right now")
|
err = fmt.Errorf("ConfigureCBR0 requested, but PodCIDR not set. Will not configure CBR0 right now")
|
||||||
glog.Warning(err)
|
glog.Warning(err)
|
||||||
} else if err := kl.reconcileCBR0(kl.podCIDR); err != nil {
|
} else if err := kl.reconcileCBR0(podCIDR); err != nil {
|
||||||
err = fmt.Errorf("Error configuring cbr0: %v", err)
|
err = fmt.Errorf("Error configuring cbr0: %v", err)
|
||||||
glog.Error(err)
|
glog.Error(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kl.runtimeState.setNetworkError(err)
|
kl.runtimeState.setNetworkState(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
||||||
|
@ -140,7 +140,7 @@ func newTestKubelet(t *testing.T) *TestKubelet {
|
|||||||
|
|
||||||
kubelet.volumeManager = newVolumeManager()
|
kubelet.volumeManager = newVolumeManager()
|
||||||
kubelet.containerManager, _ = newContainerManager(fakeContainerMgrMountInt(), mockCadvisor, "", "", "")
|
kubelet.containerManager, _ = newContainerManager(fakeContainerMgrMountInt(), mockCadvisor, "", "", "")
|
||||||
kubelet.runtimeState.setNetworkError(nil)
|
kubelet.runtimeState.setNetworkState(nil)
|
||||||
fakeClock := &util.FakeClock{Time: time.Now()}
|
fakeClock := &util.FakeClock{Time: time.Now()}
|
||||||
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
|
kubelet.backOff = util.NewBackOff(time.Second, time.Minute)
|
||||||
kubelet.backOff.Clock = fakeClock
|
kubelet.backOff.Clock = fakeClock
|
||||||
@ -2996,8 +2996,8 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
kubelet.runtimeState = newRuntimeState(time.Duration(0))
|
kubelet.runtimeState = newRuntimeState(time.Duration(0))
|
||||||
|
kubelet.runtimeState.setNetworkState(nil)
|
||||||
kubelet.updateRuntimeUp()
|
kubelet.updateRuntimeUp()
|
||||||
if err := kubelet.updateNodeStatus(); err != nil {
|
if err := kubelet.updateNodeStatus(); err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package kubelet
|
package kubelet
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@ -35,10 +36,7 @@ func (s *runtimeState) setRuntimeSync(t time.Time) {
|
|||||||
s.lastBaseRuntimeSync = t
|
s.lastBaseRuntimeSync = t
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *runtimeState) setNetworkError(err error) {
|
func (s *runtimeState) setNetworkState(err error) {
|
||||||
if err == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
s.networkError = err
|
s.networkError = err
|
||||||
@ -70,5 +68,6 @@ func newRuntimeState(runtimeSyncThreshold time.Duration) *runtimeState {
|
|||||||
return &runtimeState{
|
return &runtimeState{
|
||||||
lastBaseRuntimeSync: time.Time{},
|
lastBaseRuntimeSync: time.Time{},
|
||||||
baseRuntimeSyncThreshold: runtimeSyncThreshold,
|
baseRuntimeSyncThreshold: runtimeSyncThreshold,
|
||||||
|
networkError: fmt.Errorf("network state unknown"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user