Put cbr0-modifying stuff behind a flag. Address some other comments.

This commit is contained in:
CJ Cullen 2015-05-11 14:07:24 -07:00
parent 5e3d2b9138
commit 31ea7d1295
4 changed files with 44 additions and 14 deletions

View File

@ -40,4 +40,9 @@
{% set docker_root = " --docker_root=" + grains.docker_root -%} {% set docker_root = " --docker_root=" + grains.docker_root -%}
{% endif -%} {% endif -%}
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}}" {% set configure_cbr0 = "" -%}
{% if pillar['allocate_node_cidrs'] is defined -%}
{% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%}
{% endif -%}
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{configure_cbr0}}"

View File

@ -104,6 +104,7 @@ type KubeletServer struct {
CgroupRoot string CgroupRoot string
ContainerRuntime string ContainerRuntime string
DockerDaemonContainer string DockerDaemonContainer string
ConfigureCBR0 bool
// Flags intended for testing // Flags intended for testing
@ -162,6 +163,7 @@ func NewKubeletServer() *KubeletServer {
CgroupRoot: "/", CgroupRoot: "/",
ContainerRuntime: "docker", ContainerRuntime: "docker",
DockerDaemonContainer: "/docker-daemon", DockerDaemonContainer: "/docker-daemon",
ConfigureCBR0: false,
} }
} }
@ -218,6 +220,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.CgroupRoot, "cgroup_root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '/', which means top-level.") fs.StringVar(&s.CgroupRoot, "cgroup_root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '/', which means top-level.")
fs.StringVar(&s.ContainerRuntime, "container_runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.") fs.StringVar(&s.ContainerRuntime, "container_runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
fs.StringVar(&s.DockerDaemonContainer, "docker-daemon-container", s.DockerDaemonContainer, "Optional resource-only container in which to place the Docker Daemon. Empty for no container (Default: /docker-daemon).") fs.StringVar(&s.DockerDaemonContainer, "docker-daemon-container", s.DockerDaemonContainer, "Optional resource-only container in which to place the Docker Daemon. Empty for no container (Default: /docker-daemon).")
fs.BoolVar(&s.ConfigureCBR0, "configure-cbr0", s.ConfigureCBR0, "If true, kubelet will configure cbr0 based on Node.Spec.PodCIDR.")
// Flags intended for testing, not recommended used in production environments. // Flags intended for testing, not recommended used in production environments.
fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.") fs.BoolVar(&s.ReallyCrashForTesting, "really-crash-for-testing", s.ReallyCrashForTesting, "If true, when panics occur crash. Intended for testing.")
@ -333,6 +336,7 @@ func (s *KubeletServer) Run(_ []string) error {
ContainerRuntime: s.ContainerRuntime, ContainerRuntime: s.ContainerRuntime,
Mounter: mounter, Mounter: mounter,
DockerDaemonContainer: s.DockerDaemonContainer, DockerDaemonContainer: s.DockerDaemonContainer,
ConfigureCBR0: s.ConfigureCBR0,
} }
RunKubelet(&kcfg, nil) RunKubelet(&kcfg, nil)
@ -582,6 +586,7 @@ type KubeletConfig struct {
ContainerRuntime string ContainerRuntime string
Mounter mount.Interface Mounter mount.Interface
DockerDaemonContainer string DockerDaemonContainer string
ConfigureCBR0 bool
} }
func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) { func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.PodConfig, err error) {
@ -631,7 +636,8 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
kc.CgroupRoot, kc.CgroupRoot,
kc.ContainerRuntime, kc.ContainerRuntime,
kc.Mounter, kc.Mounter,
kc.DockerDaemonContainer) kc.DockerDaemonContainer,
kc.ConfigureCBR0)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err

View File

@ -29,7 +29,7 @@ var cidrRegexp = regexp.MustCompile(`inet ([0-9a-fA-F.:]*/[0-9]*)`)
func ensureCbr0(wantCIDR *net.IPNet) error { func ensureCbr0(wantCIDR *net.IPNet) error {
if !cbr0CidrCorrect(wantCIDR) { if !cbr0CidrCorrect(wantCIDR) {
glog.V(5).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR) glog.V(2).Infof("Attempting to recreate cbr0 with address range: %s", wantCIDR)
// delete cbr0 // delete cbr0
if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil { if err := exec.Command("ip", "link", "set", "dev", "cbr0", "down").Run(); err != nil {
@ -56,9 +56,10 @@ func ensureCbr0(wantCIDR *net.IPNet) error {
// restart docker // restart docker
if err := exec.Command("service", "docker", "restart").Run(); err != nil { if err := exec.Command("service", "docker", "restart").Run(); err != nil {
glog.Error(err) glog.Error(err)
return err // For now just log the error. The containerRuntime check will catch docker failures.
// TODO (dawnchen) figure out what we should do for rkt here.
} }
glog.V(5).Info("Recreated cbr0 and restarted docker") glog.V(2).Info("Recreated cbr0 and restarted docker")
} }
return nil return nil
} }
@ -73,11 +74,11 @@ func cbr0CidrCorrect(wantCIDR *net.IPNet) bool {
return false return false
} }
cbr0IP, cbr0CIDR, err := net.ParseCIDR(string(match[1])) cbr0IP, cbr0CIDR, err := net.ParseCIDR(string(match[1]))
cbr0CIDR.IP = cbr0IP
if err != nil { if err != nil {
glog.Errorf("Couldn't parse CIDR: %q", match[1]) glog.Errorf("Couldn't parse CIDR: %q", match[1])
return false return false
} }
cbr0CIDR.IP = cbr0IP
glog.V(5).Infof("Want cbr0 CIDR: %s, have cbr0 CIDR: %s", wantCIDR, cbr0CIDR) glog.V(5).Infof("Want cbr0 CIDR: %s, have cbr0 CIDR: %s", wantCIDR, cbr0CIDR)
return wantCIDR.IP.Equal(cbr0IP) && bytes.Equal(wantCIDR.Mask, cbr0CIDR.Mask) return wantCIDR.IP.Equal(cbr0IP) && bytes.Equal(wantCIDR.Mask, cbr0CIDR.Mask)
} }

View File

@ -138,7 +138,8 @@ func NewMainKubelet(
cgroupRoot string, cgroupRoot string,
containerRuntime string, containerRuntime string,
mounter mount.Interface, mounter mount.Interface,
dockerDaemonContainer string) (*Kubelet, error) { dockerDaemonContainer string,
configureCBR0 bool) (*Kubelet, error) {
if rootDirectory == "" { if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory) return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
} }
@ -244,6 +245,7 @@ func NewMainKubelet(
oomWatcher: oomWatcher, oomWatcher: oomWatcher,
cgroupRoot: cgroupRoot, cgroupRoot: cgroupRoot,
mounter: mounter, mounter: mounter,
configureCBR0: configureCBR0,
} }
if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil { if plug, err := network.InitNetworkPlugin(networkPlugins, networkPluginName, &networkHost{klet}); err != nil {
@ -456,6 +458,10 @@ type Kubelet struct {
// Manager of non-Runtime containers. // Manager of non-Runtime containers.
containerManager containerManager containerManager containerManager
// Whether or not kubelet should take responsibility for keeping cbr0 in
// the correct state.
configureCBR0 bool
} }
// getRootDir returns the full path to the directory under which kubelet can // getRootDir returns the full path to the directory under which kubelet can
@ -1555,7 +1561,7 @@ func (kl *Kubelet) updateRuntimeUp() {
} }
} }
func (kl *Kubelet) reconcileCbr0(podCIDR string) error { func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
if podCIDR == "" { if podCIDR == "" {
glog.V(5).Info("PodCIDR not set. Will not configure cbr0.") glog.V(5).Info("PodCIDR not set. Will not configure cbr0.")
return nil return nil
@ -1606,7 +1612,8 @@ func (kl *Kubelet) recordNodeUnschedulableEvent() {
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus() // Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
var oldNodeUnschedulable bool var oldNodeUnschedulable bool
// tryUpdateNodeStatus tries to update node status to master. // tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
// is set, this function will also confirm that cbr0 is configured correctly.
func (kl *Kubelet) tryUpdateNodeStatus() error { func (kl *Kubelet) tryUpdateNodeStatus() error {
node, err := kl.kubeClient.Nodes().Get(kl.hostname) node, err := kl.kubeClient.Nodes().Get(kl.hostname)
if err != nil { if err != nil {
@ -1616,8 +1623,12 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
return fmt.Errorf("no node instance returned for %q", kl.hostname) return fmt.Errorf("no node instance returned for %q", kl.hostname)
} }
if err := kl.reconcileCbr0(node.Spec.PodCIDR); err != nil { networkConfigured := true
glog.Errorf("Error configuring cbr0: %v", err) if kl.configureCBR0 {
if err := kl.reconcileCBR0(node.Spec.PodCIDR); err != nil {
networkConfigured = false
glog.Errorf("Error configuring cbr0: %v", err)
}
} }
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start // TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
@ -1661,18 +1672,25 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
currentTime := util.Now() currentTime := util.Now()
var newCondition api.NodeCondition var newCondition api.NodeCondition
if containerRuntimeUp { if containerRuntimeUp && networkConfigured {
newCondition = api.NodeCondition{ newCondition = api.NodeCondition{
Type: api.NodeReady, Type: api.NodeReady,
Status: api.ConditionTrue, Status: api.ConditionTrue,
Reason: fmt.Sprintf("kubelet is posting ready status"), Reason: "kubelet is posting ready status",
LastHeartbeatTime: currentTime, LastHeartbeatTime: currentTime,
} }
} else { } else {
var reasons []string
if !containerRuntimeUp {
reasons = append(reasons, "container runtime is down")
}
if !networkConfigured {
reasons = append(reasons, "network not configured correctly")
}
newCondition = api.NodeCondition{ newCondition = api.NodeCondition{
Type: api.NodeReady, Type: api.NodeReady,
Status: api.ConditionFalse, Status: api.ConditionFalse,
Reason: fmt.Sprintf("container runtime is down"), Reason: strings.Join(reasons, ","),
LastHeartbeatTime: currentTime, LastHeartbeatTime: currentTime,
} }
} }