From 09285864db0d925b650488abb98cdea797eed09a Mon Sep 17 00:00:00 2001 From: Cesar Wong Date: Thu, 7 Apr 2016 13:43:52 -0400 Subject: [PATCH] Initial windows container runtime --- pkg/kubelet/cadvisor/cadvisor_stub.go | 74 +++ pkg/kubelet/cm/container_manager_stub.go | 4 +- .../cm/container_manager_unsupported.go | 64 --- pkg/kubelet/dockertools/docker_manager.go | 440 ++++++++++++++++-- pkg/volume/volume.go | 57 ++- 5 files changed, 541 insertions(+), 98 deletions(-) create mode 100644 pkg/kubelet/cadvisor/cadvisor_stub.go delete mode 100644 pkg/kubelet/cm/container_manager_unsupported.go diff --git a/pkg/kubelet/cadvisor/cadvisor_stub.go b/pkg/kubelet/cadvisor/cadvisor_stub.go new file mode 100644 index 00000000000..f2cb6e3e73f --- /dev/null +++ b/pkg/kubelet/cadvisor/cadvisor_stub.go @@ -0,0 +1,74 @@ +// +build darwin windows + +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cadvisor + +import ( + "github.com/google/cadvisor/events" + cadvisorapi "github.com/google/cadvisor/info/v1" + cadvisorapiv2 "github.com/google/cadvisor/info/v2" +) + +type cadvisorStub struct { +} + +var _ Interface = new(cadvisorStub) + +func New(port uint, runtime string) (Interface, error) { + return &cadvisorStub{}, nil +} + +func (cu *cadvisorStub) Start() error { + return nil +} + +func (cu *cadvisorStub) DockerContainer(name string, req *cadvisorapi.ContainerInfoRequest) (cadvisorapi.ContainerInfo, error) { + return cadvisorapi.ContainerInfo{}, nil +} + +func (cu *cadvisorStub) ContainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (*cadvisorapi.ContainerInfo, error) { + return &cadvisorapi.ContainerInfo{}, nil +} + +func (cu *cadvisorStub) ContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) { + return make(map[string]cadvisorapiv2.ContainerInfo), nil +} + +func (cu *cadvisorStub) SubcontainerInfo(name string, req *cadvisorapi.ContainerInfoRequest) (map[string]*cadvisorapi.ContainerInfo, error) { + return nil, nil +} + +func (cu *cadvisorStub) MachineInfo() (*cadvisorapi.MachineInfo, error) { + return &cadvisorapi.MachineInfo{}, nil +} + +func (cu *cadvisorStub) VersionInfo() (*cadvisorapi.VersionInfo, error) { + return &cadvisorapi.VersionInfo{}, nil +} + +func (cu *cadvisorStub) ImagesFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, nil +} + +func (cu *cadvisorStub) RootFsInfo() (cadvisorapiv2.FsInfo, error) { + return cadvisorapiv2.FsInfo{}, nil +} + +func (cu *cadvisorStub) WatchEvents(request *events.Request) (*events.EventChannel, error) { + return &events.EventChannel{}, nil +} diff --git a/pkg/kubelet/cm/container_manager_stub.go b/pkg/kubelet/cm/container_manager_stub.go index 186d773dbe9..94c71729253 100644 --- a/pkg/kubelet/cm/container_manager_stub.go +++ b/pkg/kubelet/cm/container_manager_stub.go @@ -1,3 +1,5 @@ +// +build !linux + /* Copyright 2015 The Kubernetes Authors. @@ -23,8 +25,6 @@ import ( type containerManagerStub struct{} -var _ ContainerManager = &containerManagerStub{} - func (cm *containerManagerStub) Start(_ *api.Node) error { glog.V(2).Infof("Starting stub container manager") return nil diff --git a/pkg/kubelet/cm/container_manager_unsupported.go b/pkg/kubelet/cm/container_manager_unsupported.go deleted file mode 100644 index 4118a998f8c..00000000000 --- a/pkg/kubelet/cm/container_manager_unsupported.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build !linux - -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cm - -import ( - "fmt" - - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/kubelet/cadvisor" - "k8s.io/kubernetes/pkg/util/mount" -) - -type unsupportedContainerManager struct { -} - -var _ ContainerManager = &unsupportedContainerManager{} - -func (unsupportedContainerManager) Start(_ *api.Node) error { - return fmt.Errorf("Container Manager is unsupported in this build") -} - -func (unsupportedContainerManager) SystemCgroupsLimit() api.ResourceList { - return api.ResourceList{} -} - -func (unsupportedContainerManager) GetNodeConfig() NodeConfig { - return NodeConfig{} -} - -func (unsupportedContainerManager) GetMountedSubsystems() *CgroupSubsystems { - return &CgroupSubsystems{} -} - -func (unsupportedContainerManager) GetQOSContainersInfo() QOSContainersInfo { - return QOSContainersInfo{} -} - -func (cm *unsupportedContainerManager) Status() Status { - return Status{} -} - -func (cm *unsupportedContainerManager) NewPodContainerManager() PodContainerManager { - return &unsupportedPodContainerManager{} -} - -func NewContainerManager(_ mount.Interface, _ cadvisor.Interface, _ NodeConfig) (ContainerManager, error) { - return &unsupportedContainerManager{}, nil -} diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index 90ece063cc6..b5083f50ce7 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -115,6 +115,10 @@ var ( defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined", ""}} ) +type WindowsDockerManager struct { + *DockerManager +} + type DockerManager struct { client DockerInterface recorder record.EventRecorder @@ -202,6 +206,71 @@ func PodInfraContainerEnv(env map[string]string) kubecontainer.Option { } } +type windowsRuntimeHelper struct { + kubecontainer.RuntimeHelper +} + +func (h *windowsRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) { + return &kubecontainer.RunContainerOptions{ + Hostname: "test-pod", + }, nil +} + +func NewWindowsDockerManager( + client DockerInterface, + recorder record.EventRecorder, + livenessManager proberesults.Manager, + containerRefManager *kubecontainer.RefManager, + podGetter podGetter, + machineInfo *cadvisorapi.MachineInfo, + podInfraContainerImage string, + qps float32, + burst int, + containerLogsDir string, + osInterface kubecontainer.OSInterface, + networkPlugin network.NetworkPlugin, + runtimeHelper kubecontainer.RuntimeHelper, + httpClient types.HttpGetter, + execHandler ExecHandler, + oomAdjuster *oom.OOMAdjuster, + procFs procfs.ProcFSInterface, + cpuCFSQuota bool, + imageBackOff *flowcontrol.Backoff, + serializeImagePulls bool, + enableCustomMetrics bool, + hairpinMode bool, + seccompProfileRoot string, + options ...kubecontainer.Option) *WindowsDockerManager { + dockerManager := NewDockerManager(client, + recorder, + livenessManager, + containerRefManager, + podGetter, + machineInfo, + podInfraContainerImage, + qps, + burst, + containerLogsDir, + osInterface, + networkPlugin, + runtimeHelper, + httpClient, + execHandler, + oomAdjuster, + procFs, + cpuCFSQuota, + imageBackOff, + serializeImagePulls, + enableCustomMetrics, + hairpinMode, + seccompProfileRoot, + options...) + + return &WindowsDockerManager{ + DockerManager: dockerManager, + } +} + func NewDockerManager( client DockerInterface, recorder record.EventRecorder, @@ -345,10 +414,13 @@ func (dm *DockerManager) determineContainerIP(podNamespace, podName string, cont if container.NetworkSettings != nil { result = container.NetworkSettings.IPAddress - - // Fall back to IPv6 address if no IPv4 address is present - if result == "" { - result = container.NetworkSettings.GlobalIPv6Address + if len(result) == 0 { + for _, network := range container.NetworkSettings.Networks { + if len(network.IPAddress) > 0 { + result = network.IPAddress + break + } + } } } @@ -431,13 +503,11 @@ func (dm *DockerManager) inspectContainer(id string, podName, podNamespace strin // Container that are running, restarting and paused status.State = kubecontainer.ContainerStateRunning status.StartedAt = startedAt - if containerName == PodInfraContainerName { - ip, err = dm.determineContainerIP(podNamespace, podName, iResult) - // Kubelet doesn't handle the network error scenario - if err != nil { - status.State = kubecontainer.ContainerStateUnknown - status.Message = fmt.Sprintf("Network error: %#v", err) - } + ip, err = dm.determineContainerIP(podNamespace, podName, iResult) + // Kubelet doesn't handle the network error scenario + if err != nil { + status.State = kubecontainer.ContainerStateUnknown + status.Message = fmt.Sprintf("Network error: %#v", err) } return &status, ip, nil } @@ -637,6 +707,7 @@ func (dm *DockerManager) runContainer( cpuShares = milliCPUToShares(cpuRequest.MilliValue()) } var devices []dockercontainer.DeviceMapping + _ = devices if nvidiaGPULimit.Value() != 0 { // Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for // (we only support one device per node). @@ -670,20 +741,23 @@ func (dm *DockerManager) runContainer( } } + _ = cpuShares + _ = memoryLimit + _ = securityOpts hc := &dockercontainer.HostConfig{ - Binds: binds, - NetworkMode: dockercontainer.NetworkMode(netMode), - IpcMode: dockercontainer.IpcMode(ipcMode), - UTSMode: dockercontainer.UTSMode(utsMode), - PidMode: dockercontainer.PidMode(pidMode), - ReadonlyRootfs: readOnlyRootFilesystem(container), - Resources: dockercontainer.Resources{ - Memory: memoryLimit, - MemorySwap: -1, - CPUShares: cpuShares, - Devices: devices, - }, - SecurityOpt: fmtSecurityOpts, + // Binds: binds, + NetworkMode: dockercontainer.NetworkMode(netMode), + // IpcMode: dockercontainer.IpcMode(ipcMode), + // UTSMode: dockercontainer.UTSMode(utsMode), + // PidMode: dockercontainer.PidMode(pidMode), + // ReadonlyRootfs: readOnlyRootFilesystem(container), + // Resources: dockercontainer.Resources{ + // Memory: memoryLimit, + // MemorySwap: -1, + // CPUShares: cpuShares, + // Devices: devices, + // }, + // SecurityOpt: securityOpts, } // Set sysctls if requested @@ -746,10 +820,10 @@ func (dm *DockerManager) runContainer( glog.V(3).Infof("Container %v/%v/%v: setting entrypoint \"%v\" and command \"%v\"", pod.Namespace, pod.Name, container.Name, dockerOpts.Config.Entrypoint, dockerOpts.Config.Cmd) - supplementalGids := dm.runtimeHelper.GetExtraSupplementalGroupsForPod(pod) - securityContextProvider := securitycontext.NewSimpleSecurityContextProvider() - securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config) - securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig, supplementalGids) + // supplementalGids := dm.runtimeHelper.GetExtraSupplementalGroupsForPod(pod) + // securityContextProvider := securitycontext.NewSimpleSecurityContextProvider() + // securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config) + // securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig, supplementalGids) createResp, err := dm.client.CreateContainer(dockerOpts) if err != nil { dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err) @@ -925,6 +999,10 @@ func (dm *DockerManager) PullImage(image kubecontainer.ImageSpec, secrets []api. return dm.dockerPuller.Pull(image.Image, secrets) } +func (dm *WindowsDockerManager) PullImage(image kubecontainer.ImageSpec, secrets []api.Secret) error { + return fmt.Errorf("Image pull not yet supported") +} + // IsImagePresent checks whether the container image is already in the local storage. func (dm *DockerManager) IsImagePresent(image kubecontainer.ImageSpec) (bool, error) { return dm.dockerPuller.IsImagePresent(image.Image) @@ -1743,9 +1821,9 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe // Check if current docker version is higher than 1.10. Otherwise, we have to apply OOMScoreAdj instead of using docker API. // TODO: Remove this logic after we stop supporting docker version < 1.10. - if err = dm.applyOOMScoreAdjIfNeeded(pod, container, containerInfo); err != nil { - return kubecontainer.ContainerID{}, err - } + // if err = dm.applyOOMScoreAdjIfNeeded(pod, container, containerInfo); err != nil { + // return kubecontainer.ContainerID{}, err + // } // The addNDotsOption call appends the ndots option to the resolv.conf file generated by docker. // This resolv.conf file is shared by all containers of the same pod, and needs to be modified only once per pod. @@ -2070,6 +2148,116 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub }, nil } +func (dm *WindowsDockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) { + start := time.Now() + defer func() { + metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start)) + }() + glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod) + + containersToStart := make(map[int]string) + containersToKeep := make(map[kubecontainer.DockerID]int) + + // var err error + + // Skip podInfraContainer checking and creation + // var podInfraContainerID kubecontainer.DockerID + // var changed bool + // podInfraContainerStatus := podStatus.FindContainerStatusByName(PodInfraContainerName) + // if podInfraContainerStatus != nil && podInfraContainerStatus.State == kubecontainer.ContainerStateRunning { + // glog.V(4).Infof("Found pod infra container for %q", format.Pod(pod)) + // changed, err = dm.podInfraContainerChanged(pod, podInfraContainerStatus) + // if err != nil { + // return podContainerChangesSpec{}, err + // } + // } + + // createPodInfraContainer := true + // if podInfraContainerStatus == nil || podInfraContainerStatus.State != kubecontainer.ContainerStateRunning { + // glog.V(2).Infof("Need to restart pod infra container for %q because it is not found", format.Pod(pod)) + // } else if changed { + // glog.V(2).Infof("Need to restart pod infra container for %q because it is changed", format.Pod(pod)) + // } else { + // glog.V(4).Infof("Pod infra container looks good, keep it %q", format.Pod(pod)) + // createPodInfraContainer = false + // podInfraContainerID = kubecontainer.DockerID(podInfraContainerStatus.ID.ID) + // containersToKeep[podInfraContainerID] = -1 + // } + + for index, container := range pod.Spec.Containers { + expectedHash := kubecontainer.HashContainer(&container) + + containerStatus := podStatus.FindContainerStatusByName(container.Name) + if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning { + if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) { + // If we are here it means that the container is dead and should be restarted, or never existed and should + // be created. We may be inserting this ID again if the container has changed and it has + // RestartPolicy::Always, but it's not a big deal. + message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container) + glog.V(3).Info(message) + containersToStart[index] = message + } + continue + } + + containerID := kubecontainer.DockerID(containerStatus.ID.ID) + hash := containerStatus.Hash + glog.V(3).Infof("pod %q container %q exists as %v", format.Pod(pod), container.Name, containerID) + + // if createPodInfraContainer { + // // createPodInfraContainer == true and Container exists + // // If we're creating infra container everything will be killed anyway + // // If RestartPolicy is Always or OnFailure we restart containers that were running before we + // // killed them when restarting Infra Container. + // if pod.Spec.RestartPolicy != api.RestartPolicyNever { + // message := fmt.Sprintf("Infra Container is being recreated. %q will be restarted.", container.Name) + // glog.V(1).Info(message) + // containersToStart[index] = message + // } + // continue + // } + + // At this point, the container is running and pod infra container is good. + // We will look for changes and check healthiness for the container. + containerChanged := hash != 0 && hash != expectedHash + if containerChanged { + message := fmt.Sprintf("pod %q container %q hash changed (%d vs %d), it will be killed and re-created.", format.Pod(pod), container.Name, hash, expectedHash) + glog.Info(message) + containersToStart[index] = message + continue + } + + liveness, found := dm.livenessManager.Get(containerStatus.ID) + if !found || liveness == proberesults.Success { + containersToKeep[containerID] = index + continue + } + if pod.Spec.RestartPolicy != api.RestartPolicyNever { + message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name) + glog.Info(message) + containersToStart[index] = message + } + } + + // After the loop one of the following should be true: + // - createPodInfraContainer is true and containersToKeep is empty. + // (In fact, when createPodInfraContainer is false, containersToKeep will not be touched). + // - createPodInfraContainer is false and containersToKeep contains at least ID of Infra Container + + // If Infra container is the last running one, we don't want to keep it. + // if !createPodInfraContainer && len(containersToStart) == 0 && len(containersToKeep) == 1 { + // containersToKeep = make(map[kubecontainer.DockerID]int) + // } + + return podContainerChangesSpec{ + StartInfraContainer: false, + InfraChanged: false, + InfraContainerId: "", + ContainersToStart: containersToStart, + ContainersToKeep: containersToKeep, + }, nil +} + // Sync the running pod to match the specified desired pod. func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { start := time.Now() @@ -2412,6 +2600,196 @@ func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) ( return &pod.Spec.InitContainers[0], nil, false } +// Sync the running pod to match the specified desired pod. +func (dm *WindowsDockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) { + start := time.Now() + defer func() { + metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start)) + }() + + containerChanges, err := dm.computePodContainerChanges(pod, podStatus) + if err != nil { + result.Fail(err) + return + } + glog.V(3).Infof("Got container changes for pod %q: %+v", format.Pod(pod), containerChanges) + + // if containerChanges.InfraChanged { + // ref, err := api.GetReference(pod) + // if err != nil { + // glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err) + // } + // dm.recorder.Eventf(ref, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.") + // } + // if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) { + // if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 { + // glog.V(4).Infof("Killing Infra Container for %q because all other containers are dead.", format.Pod(pod)) + // } else { + // glog.V(4).Infof("Killing Infra Container for %q, will start new one", format.Pod(pod)) + // } + + // // Killing phase: if we want to start new infra container, or nothing is running kill everything (including infra container) + // // TODO(random-liu): We'll use pod status directly in the future + // killResult := dm.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(podStatus)) + // result.AddPodSyncResult(killResult) + // if killResult.Error() != nil { + // return + // } + // } else { + // Otherwise kill any running containers in this pod which are not specified as ones to keep. + runningContainerStatues := podStatus.GetRunningContainerStatuses() + for _, containerStatus := range runningContainerStatues { + _, keep := containerChanges.ContainersToKeep[kubecontainer.DockerID(containerStatus.ID.ID)] + if !keep { + glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod)) + // attempt to find the appropriate container policy + var podContainer *api.Container + var killMessage string + for i, c := range pod.Spec.Containers { + if c.Name == containerStatus.Name { + podContainer = &pod.Spec.Containers[i] + killMessage = containerChanges.ContainersToStart[i] + break + } + } + killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerStatus.Name) + result.AddSyncResult(killContainerResult) + if err := dm.KillContainerInPod(containerStatus.ID, podContainer, pod, killMessage, nil); err != nil { + killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error()) + glog.Errorf("Error killing container %q(id=%q) for pod %q: %v", containerStatus.Name, containerStatus.ID, format.Pod(pod), err) + return + } + } + } + // } + + // We pass the value of the podIP down to runContainerInPod, which in turn + // passes it to various other functions, in order to facilitate + // functionality that requires this value (hosts file and downward API) + // and avoid races determining the pod IP in cases where a container + // requires restart but the podIP isn't in the status manager yet. + // + // We default to the IP in the passed-in pod status, and overwrite it if the + // infra container needs to be (re)started. + podIP := "" + if podStatus != nil { + podIP = podStatus.IP + } + + // If we should create infra container then we do it first. + // podInfraContainerID := containerChanges.InfraContainerId + // if containerChanges.StartInfraContainer && (len(containerChanges.ContainersToStart) > 0) { + // glog.V(4).Infof("Creating pod infra container for %q", format.Pod(pod)) + // startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, PodInfraContainerName) + // result.AddSyncResult(startContainerResult) + // var msg string + // podInfraContainerID, err, msg = dm.createPodInfraContainer(pod) + // if err != nil { + // startContainerResult.Fail(err, msg) + // glog.Errorf("Failed to create pod infra container: %v; Skipping pod %q", err, format.Pod(pod)) + // return + // } + + // setupNetworkResult := kubecontainer.NewSyncResult(kubecontainer.SetupNetwork, kubecontainer.GetPodFullName(pod)) + // result.AddSyncResult(setupNetworkResult) + // if !usesHostNetwork(pod) { + // // Call the networking plugin + // err = dm.networkPlugin.SetUpPod(pod.Namespace, pod.Name, podInfraContainerID) + // if err != nil { + // // TODO: (random-liu) There shouldn't be "Skipping pod" in sync result message + // message := fmt.Sprintf("Failed to setup network for pod %q using network plugins %q: %v; Skipping pod", format.Pod(pod), dm.networkPlugin.Name(), err) + // setupNetworkResult.Fail(kubecontainer.ErrSetupNetwork, message) + // glog.Error(message) + + // // Delete infra container + // killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, PodInfraContainerName) + // result.AddSyncResult(killContainerResult) + // if delErr := dm.KillContainerInPod(kubecontainer.ContainerID{ + // ID: string(podInfraContainerID), + // Type: "docker"}, nil, pod, message); delErr != nil { + // killContainerResult.Fail(kubecontainer.ErrKillContainer, delErr.Error()) + // glog.Warningf("Clear infra container failed for pod %q: %v", format.Pod(pod), delErr) + // } + // return + // } + + // // Setup the host interface unless the pod is on the host's network (FIXME: move to networkPlugin when ready) + // var podInfraContainer *docker.Container + // podInfraContainer, err = dm.client.InspectContainer(string(podInfraContainerID)) + // if err != nil { + // glog.Errorf("Failed to inspect pod infra container: %v; Skipping pod %q", err, format.Pod(pod)) + // result.Fail(err) + // return + // } + + // if dm.configureHairpinMode { + // if err = hairpin.SetUpContainer(podInfraContainer.State.Pid, network.DefaultInterfaceName); err != nil { + // glog.Warningf("Hairpin setup failed for pod %q: %v", format.Pod(pod), err) + // } + // } + + // // Overwrite the podIP passed in the pod status, since we just started the infra container. + // podIP = dm.determineContainerIP(pod.Name, pod.Namespace, podInfraContainer) + // } + // } + + // Start everything + for idx := range containerChanges.ContainersToStart { + container := &pod.Spec.Containers[idx] + startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name) + result.AddSyncResult(startContainerResult) + + // containerChanges.StartInfraContainer causes the containers to be restarted for config reasons + // ignore backoff + if !containerChanges.StartInfraContainer { + isInBackOff, err, msg := dm.doBackOff(pod, container, podStatus, backOff) + if isInBackOff { + startContainerResult.Fail(err, msg) + glog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod)) + continue + } + } + glog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod)) + // err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets) + // if err != nil { + // startContainerResult.Fail(err, msg) + // continue + // } + + // if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot { + // err := dm.verifyNonRoot(container) + // if err != nil { + // startContainerResult.Fail(kubecontainer.ErrVerifyNonRoot, err.Error()) + // glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err) + // continue + // } + // } + // For a new container, the RestartCount should be 0 + restartCount := 0 + containerStatus := podStatus.FindContainerStatusByName(container.Name) + if containerStatus != nil { + restartCount = containerStatus.RestartCount + 1 + } + + // TODO(dawnchen): Check RestartPolicy.DelaySeconds before restart a container + // Note: when configuring the pod's containers anything that can be configured by pointing + // to the namespace of the infra container should use namespaceMode. This includes things like the net namespace + // and IPC namespace. PID mode cannot point to another container right now. + // See createPodInfraContainer for infra container setup. + // namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID) + namespaceMode := "" + _, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, getPidMode(pod), podIP, restartCount) + if err != nil { + startContainerResult.Fail(kubecontainer.ErrRunContainer, err.Error()) + // TODO(bburns) : Perhaps blacklist a container after N failures? + glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err) + continue + } + // Successfully started the container; clear the entry in the failure + } + return +} + // verifyNonRoot returns an error if the container or image will run as the root user. func (dm *DockerManager) verifyNonRoot(container *api.Container) error { if securitycontext.HasRunAsUser(container) { diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go index 54081c54534..93db6169cb9 100644 --- a/pkg/volume/volume.go +++ b/pkg/volume/volume.go @@ -17,6 +17,7 @@ limitations under the License. package volume import ( + "io" "io/ioutil" "os" "path" @@ -213,9 +214,63 @@ func RenameDirectory(oldPath, newName string) (string, error) { if err != nil { return "", err } - err = os.Rename(oldPath, newPath) + // os.Rename call fails on windows (https://github.com/golang/go/issues/14527) + // Replacing with copyFolder to the newPath and deleting the oldPath directory + // err = os.Rename(oldPath, newPath) + err = copyFolder(oldPath, newPath) + if err != nil { return "", err } + os.RemoveAll(oldPath) return newPath, nil } + +func copyFolder(source string, dest string) (err error) { + directory, _ := os.Open(source) + objects, err := directory.Readdir(-1) + + for _, obj := range objects { + sourcefilepointer := source + "/" + obj.Name() + destinationfilepointer := dest + "/" + obj.Name() + if obj.IsDir() { + err = copyFolder(sourcefilepointer, destinationfilepointer) + if err != nil { + return err + } + } else { + err = copyFile(sourcefilepointer, destinationfilepointer) + if err != nil { + return err + } + } + + } + return +} + +func copyFile(source string, dest string) (err error) { + sourcefile, err := os.Open(source) + if err != nil { + return err + } + + defer sourcefile.Close() + + destfile, err := os.Create(dest) + if err != nil { + return err + } + + defer destfile.Close() + + _, err = io.Copy(destfile, sourcefile) + if err == nil { + sourceinfo, err := os.Stat(source) + if err != nil { + err = os.Chmod(dest, sourceinfo.Mode()) + } + + } + return +}