cmd/kubelet

This commit is contained in:
Chao Xu 2016-11-18 12:50:58 -08:00
parent 7eeb71f698
commit 5e1adf91df
178 changed files with 3685 additions and 3560 deletions

View File

@ -26,9 +26,9 @@ import (
"k8s.io/kubernetes/pkg/auth/authenticator/bearertoken"
"k8s.io/kubernetes/pkg/auth/authorizer"
"k8s.io/kubernetes/pkg/auth/group"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
authenticationclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authentication/internalversion"
authorizationclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/authorization/internalversion"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
authenticationclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/authentication/v1beta1"
authorizationclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/authorization/v1beta1"
alwaysallowauthorizer "k8s.io/kubernetes/pkg/genericapiserver/authorizer"
"k8s.io/kubernetes/pkg/kubelet/server"
"k8s.io/kubernetes/pkg/types"
@ -40,7 +40,7 @@ import (
webhooksar "k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook"
)
func buildAuth(nodeName types.NodeName, client internalclientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) {
func buildAuth(nodeName types.NodeName, client clientset.Interface, config componentconfig.KubeletConfiguration) (server.AuthInterface, error) {
// Get clients, if provided
var (
tokenClient authenticationclient.TokenReviewInterface

View File

@ -25,7 +25,7 @@ import (
"github.com/golang/glog"
unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion"
unversionedcertificates "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/certificates/v1alpha1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"

View File

@ -38,12 +38,13 @@ import (
"k8s.io/kubernetes/cmd/kubelet/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/chaosclient"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
clientauth "k8s.io/kubernetes/pkg/client/unversioned/auth"
@ -170,7 +171,7 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletD
return "", err
}
configmap, err := func() (*api.ConfigMap, error) {
configmap, err := func() (*v1.ConfigMap, error) {
var nodename types.NodeName
hostname := nodeutil.GetHostname(s.HostnameOverride)
@ -186,14 +187,14 @@ func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletD
return nil, err
}
// look for kubelet-<node-name> configmap from "kube-system"
configmap, err := kubeClient.CoreClient.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename))
configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename))
if err != nil {
return nil, err
}
return configmap, nil
}
// No cloud provider yet, so can't get the nodename via Cloud.Instances().CurrentNodeName(hostname), try just using the hostname
configmap, err := kubeClient.CoreClient.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname))
configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname))
if err != nil {
return nil, fmt.Errorf("cloud provider was nil, and attempt to use hostname to find config resulted in: %v", err)
}
@ -660,11 +661,11 @@ func RunKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet
}
eventBroadcaster := record.NewBroadcaster()
kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: string(nodeName)})
kubeDeps.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: "kubelet", Host: string(nodeName)})
eventBroadcaster.StartLogging(glog.V(3).Infof)
if kubeDeps.EventClient != nil {
glog.V(4).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")})
} else {
glog.Warning("No api server defined - no events will be sent to API server.")
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/status"
@ -61,22 +61,22 @@ func newActiveDeadlineHandler(
}
// ShouldSync returns true if the pod is past its active deadline.
func (m *activeDeadlineHandler) ShouldSync(pod *api.Pod) bool {
func (m *activeDeadlineHandler) ShouldSync(pod *v1.Pod) bool {
return m.pastActiveDeadline(pod)
}
// ShouldEvict returns true if the pod is past its active deadline.
// It dispatches an event that the pod should be evicted if it is past its deadline.
func (m *activeDeadlineHandler) ShouldEvict(pod *api.Pod) lifecycle.ShouldEvictResponse {
func (m *activeDeadlineHandler) ShouldEvict(pod *v1.Pod) lifecycle.ShouldEvictResponse {
if !m.pastActiveDeadline(pod) {
return lifecycle.ShouldEvictResponse{Evict: false}
}
m.recorder.Eventf(pod, api.EventTypeNormal, reason, message)
m.recorder.Eventf(pod, v1.EventTypeNormal, reason, message)
return lifecycle.ShouldEvictResponse{Evict: true, Reason: reason, Message: message}
}
// pastActiveDeadline returns true if the pod has been active for more than its ActiveDeadlineSeconds
func (m *activeDeadlineHandler) pastActiveDeadline(pod *api.Pod) bool {
func (m *activeDeadlineHandler) pastActiveDeadline(pod *v1.Pod) bool {
// no active deadline was specified
if pod.Spec.ActiveDeadlineSeconds == nil {
return false

View File

@ -20,8 +20,8 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/clock"
@ -29,17 +29,17 @@ import (
// mockPodStatusProvider returns the status on the specified pod
type mockPodStatusProvider struct {
pods []*api.Pod
pods []*v1.Pod
}
// GetPodStatus returns the status on the associated pod with matching uid (if found)
func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (api.PodStatus, bool) {
func (m *mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
for _, pod := range m.pods {
if pod.UID == uid {
return pod.Status, true
}
}
return api.PodStatus{}, false
return v1.PodStatus{}, false
}
// TestActiveDeadlineHandler verifies the active deadline handler functions as expected.
@ -71,7 +71,7 @@ func TestActiveDeadlineHandler(t *testing.T) {
pods[2].Spec.ActiveDeadlineSeconds = nil
testCases := []struct {
pod *api.Pod
pod *v1.Pod
expected bool
}{{pods[0], true}, {pods[1], false}, {pods[2], false}, {pods[3], false}}

View File

@ -15,14 +15,14 @@ limitations under the License.
*/
// Code generated by protoc-gen-gogo.
// source: api.proto
// source: v1.proto
// DO NOT EDIT!
/*
Package runtime is a generated protocol buffer package.
It is generated from these files:
api.proto
v1.proto
It has these top-level messages:
VersionRequest
@ -1028,7 +1028,7 @@ type PodSandboxFilter struct {
// State of the sandbox.
State *PodSandboxState `protobuf:"varint,2,opt,name=state,enum=runtime.PodSandboxState" json:"state,omitempty"`
// LabelSelector to select matches.
// Only api.MatchLabels is supported for now and the requirements
// Only v1.MatchLabels is supported for now and the requirements
// are ANDed. MatchExpressions is not supported yet.
LabelSelector map[string]string `protobuf:"bytes,3,rep,name=label_selector,json=labelSelector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
XXX_unrecognized []byte `json:"-"`
@ -1847,7 +1847,7 @@ type ContainerFilter struct {
// ID of the PodSandbox.
PodSandboxId *string `protobuf:"bytes,3,opt,name=pod_sandbox_id,json=podSandboxId" json:"pod_sandbox_id,omitempty"`
// LabelSelector to select matches.
// Only api.MatchLabels is supported for now and the requirements
// Only v1.MatchLabels is supported for now and the requirements
// are ANDed. MatchExpressions is not supported yet.
LabelSelector map[string]string `protobuf:"bytes,4,rep,name=label_selector,json=labelSelector" json:"label_selector,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
XXX_unrecognized []byte `json:"-"`

View File

@ -18,16 +18,16 @@ package cadvisor
import (
cadvisorApi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
)
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) api.ResourceList {
c := api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(
func CapacityFromMachineInfo(info *cadvisorApi.MachineInfo) v1.ResourceList {
c := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
int64(info.NumCores*1000),
resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(
v1.ResourceMemory: *resource.NewQuantity(
int64(info.MemoryCapacity),
resource.BinarySI),
}

View File

@ -23,6 +23,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/transport"
"k8s.io/kubernetes/pkg/types"
@ -102,13 +103,13 @@ func (c *KubeletClientConfig) transportConfig() *transport.Config {
// NodeGetter defines an interface for looking up a node by name
type NodeGetter interface {
Get(name string) (*api.Node, error)
Get(name string) (*v1.Node, error)
}
// NodeGetterFunc allows implementing NodeGetter with a function
type NodeGetterFunc func(name string) (*api.Node, error)
type NodeGetterFunc func(name string) (*v1.Node, error)
func (f NodeGetterFunc) Get(name string) (*api.Node, error) {
func (f NodeGetterFunc) Get(name string) (*v1.Node, error) {
return f(name)
}
@ -123,7 +124,7 @@ type NodeConnectionInfoGetter struct {
// transport is the transport to use to send a request to all kubelets
transport http.RoundTripper
// preferredAddressTypes specifies the preferred order to use to find a node address
preferredAddressTypes []api.NodeAddressType
preferredAddressTypes []v1.NodeAddressType
}
func NewNodeConnectionInfoGetter(nodes NodeGetter, config KubeletClientConfig) (ConnectionInfoGetter, error) {
@ -137,9 +138,9 @@ func NewNodeConnectionInfoGetter(nodes NodeGetter, config KubeletClientConfig) (
return nil, err
}
types := []api.NodeAddressType{}
types := []v1.NodeAddressType{}
for _, t := range config.PreferredAddressTypes {
types = append(types, api.NodeAddressType(t))
types = append(types, v1.NodeAddressType(t))
}
return &NodeConnectionInfoGetter{

View File

@ -19,13 +19,13 @@ package client
import (
"testing"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/restclient"
)
// Ensure a node client can be used as a NodeGetter.
// This allows anyone with a node client to easily construct a NewNodeConnectionInfoGetter.
var _ = NodeGetter(internalversion.NodeInterface(nil))
var _ = NodeGetter(v1core.NodeInterface(nil))
func TestMakeTransportInvalid(t *testing.T) {
config := &KubeletClientConfig{

View File

@ -16,20 +16,18 @@ limitations under the License.
package cm
import (
"k8s.io/kubernetes/pkg/api"
)
import "k8s.io/kubernetes/pkg/api/v1"
// Manages the containers running on a machine.
type ContainerManager interface {
// Runs the container manager's housekeeping.
// - Ensures that the Docker daemon is in a container.
// - Creates the system container where all non-containerized processes run.
Start(*api.Node) error
Start(*v1.Node) error
// Returns resources allocated to system cgroups in the machine.
// These cgroups include the system and Kubernetes services.
SystemCgroupsLimit() api.ResourceList
SystemCgroupsLimit() v1.ResourceList
// Returns a NodeConfig that is being used by the container manager.
GetNodeConfig() NodeConfig

View File

@ -34,8 +34,8 @@ import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/configs"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
cmutil "k8s.io/kubernetes/pkg/kubelet/cm/util"
"k8s.io/kubernetes/pkg/kubelet/qos"
@ -104,7 +104,7 @@ type containerManagerImpl struct {
periodicTasks []func()
// holds all the mounted cgroup subsystems
subsystems *CgroupSubsystems
nodeInfo *api.Node
nodeInfo *v1.Node
}
type features struct {
@ -392,7 +392,7 @@ func (cm *containerManagerImpl) setupNode() error {
})
} else if cm.RuntimeCgroupsName != "" {
cont := newSystemCgroups(cm.RuntimeCgroupsName)
var capacity = api.ResourceList{}
var capacity = v1.ResourceList{}
if info, err := cm.cadvisorInterface.MachineInfo(); err == nil {
capacity = cadvisor.CapacityFromMachineInfo(info)
}
@ -523,7 +523,7 @@ func (cm *containerManagerImpl) Status() Status {
return cm.status
}
func (cm *containerManagerImpl) Start(node *api.Node) error {
func (cm *containerManagerImpl) Start(node *v1.Node) error {
// cache the node Info including resource capacity and
// allocatable of the node
cm.nodeInfo = node
@ -566,7 +566,7 @@ func (cm *containerManagerImpl) Start(node *api.Node) error {
return nil
}
func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList {
func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
cpuLimit := int64(0)
// Sum up resources of all external containers.
@ -574,8 +574,8 @@ func (cm *containerManagerImpl) SystemCgroupsLimit() api.ResourceList {
cpuLimit += cont.cpuMillicores
}
return api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(
return v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(
cpuLimit,
resource.DecimalSI),
}

View File

@ -18,20 +18,20 @@ package cm
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
type containerManagerStub struct{}
var _ ContainerManager = &containerManagerStub{}
func (cm *containerManagerStub) Start(_ *api.Node) error {
func (cm *containerManagerStub) Start(_ *v1.Node) error {
glog.V(2).Infof("Starting stub container manager")
return nil
}
func (cm *containerManagerStub) SystemCgroupsLimit() api.ResourceList {
return api.ResourceList{}
func (cm *containerManagerStub) SystemCgroupsLimit() v1.ResourceList {
return v1.ResourceList{}
}
func (cm *containerManagerStub) GetNodeConfig() NodeConfig {

View File

@ -21,7 +21,7 @@ package cm
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/util/mount"
)
@ -31,12 +31,12 @@ type unsupportedContainerManager struct {
var _ ContainerManager = &unsupportedContainerManager{}
func (unsupportedContainerManager) Start(_ *api.Node) error {
func (unsupportedContainerManager) Start(_ *v1.Node) error {
return fmt.Errorf("Container Manager is unsupported in this build")
}
func (unsupportedContainerManager) SystemCgroupsLimit() api.ResourceList {
return api.ResourceList{}
func (unsupportedContainerManager) SystemCgroupsLimit() v1.ResourceList {
return v1.ResourceList{}
}
func (unsupportedContainerManager) GetNodeConfig() NodeConfig {

View File

@ -21,7 +21,7 @@ package cm
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/util/mount"
)
@ -32,7 +32,7 @@ type containerManagerImpl struct {
var _ ContainerManager = &containerManagerImpl{}
func (cm *containerManagerImpl) Start(_ *api.Node) error {
func (cm *containerManagerImpl) Start(_ *v1.Node) error {
glog.V(2).Infof("Starting Windows stub container manager")
return nil
}

View File

@ -25,7 +25,7 @@ import (
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/qos"
)
@ -83,7 +83,7 @@ func MilliCPUToShares(milliCPU int64) int64 {
}
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(pod *api.Pod) *ResourceConfig {
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
// sum requests and limits, track if limits were applied for each resource.
cpuRequests := int64(0)
cpuLimits := int64(0)

View File

@ -22,26 +22,26 @@ import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
)
// getResourceList returns a ResourceList with the
// specified cpu and memory resource values
func getResourceList(cpu, memory string) api.ResourceList {
res := api.ResourceList{}
func getResourceList(cpu, memory string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[api.ResourceCPU] = resource.MustParse(cpu)
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[api.ResourceMemory] = resource.MustParse(memory)
res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
// getResourceRequirements returns a ResourceRequirements object
func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements {
res := api.ResourceRequirements{}
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
@ -59,13 +59,13 @@ func TestResourceConfigForPod(t *testing.T) {
memoryQuantity = resource.MustParse("100Mi")
guaranteedMemory := memoryQuantity.Value()
testCases := map[string]struct {
pod *api.Pod
pod *v1.Pod
expected *ResourceConfig
}{
"besteffort": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
},
@ -75,9 +75,9 @@ func TestResourceConfigForPod(t *testing.T) {
expected: &ResourceConfig{CpuShares: &minShares},
},
"burstable-no-limits": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("", "")),
},
@ -87,9 +87,9 @@ func TestResourceConfigForPod(t *testing.T) {
expected: &ResourceConfig{CpuShares: &burstableShares},
},
"burstable-with-limits": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
@ -99,9 +99,9 @@ func TestResourceConfigForPod(t *testing.T) {
expected: &ResourceConfig{CpuShares: &burstableShares, CpuQuota: &burstableQuota, CpuPeriod: &burstablePeriod, Memory: &burstableMemory},
},
"burstable-partial-limits": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
},
@ -114,9 +114,9 @@ func TestResourceConfigForPod(t *testing.T) {
expected: &ResourceConfig{CpuShares: &burstablePartialShares},
},
"guaranteed": {
pod: &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
pod: &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
},

View File

@ -18,7 +18,7 @@ limitations under the License.
package cm
import "k8s.io/kubernetes/pkg/api"
import "k8s.io/kubernetes/pkg/api/v1"
const (
MinShares = 0
@ -40,7 +40,7 @@ func MilliCPUToShares(milliCPU int64) int64 {
}
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
func ResourceConfigForPod(pod *api.Pod) *ResourceConfig {
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
return nil
}

View File

@ -24,7 +24,7 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/types"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -39,7 +39,7 @@ const (
// management if qos Cgroup is enabled.
type podContainerManagerImpl struct {
// nodeInfo stores information about the node resource capacity
nodeInfo *api.Node
nodeInfo *v1.Node
// qosContainersInfo hold absolute paths of the top level qos containers
qosContainersInfo QOSContainersInfo
// Stores the mounted cgroup subsystems
@ -54,14 +54,14 @@ var _ PodContainerManager = &podContainerManagerImpl{}
// applyLimits sets pod cgroup resource limits
// It also updates the resource limits on top level qos containers.
func (m *podContainerManagerImpl) applyLimits(pod *api.Pod) error {
func (m *podContainerManagerImpl) applyLimits(pod *v1.Pod) error {
// This function will house the logic for setting the resource parameters
// on the pod container config and updating top level qos container configs
return nil
}
// Exists checks if the pod's cgroup already exists
func (m *podContainerManagerImpl) Exists(pod *api.Pod) bool {
func (m *podContainerManagerImpl) Exists(pod *v1.Pod) bool {
podContainerName, _ := m.GetPodContainerName(pod)
return m.cgroupManager.Exists(podContainerName)
}
@ -69,7 +69,7 @@ func (m *podContainerManagerImpl) Exists(pod *api.Pod) bool {
// EnsureExists takes a pod as argument and makes sure that
// pod cgroup exists if qos cgroup hierarchy flag is enabled.
// If the pod level container doesen't already exist it is created.
func (m *podContainerManagerImpl) EnsureExists(pod *api.Pod) error {
func (m *podContainerManagerImpl) EnsureExists(pod *v1.Pod) error {
podContainerName, _ := m.GetPodContainerName(pod)
// check if container already exist
alreadyExists := m.Exists(pod)
@ -94,7 +94,7 @@ func (m *podContainerManagerImpl) EnsureExists(pod *api.Pod) error {
}
// GetPodContainerName returns the CgroupName identifer, and its literal cgroupfs form on the host.
func (m *podContainerManagerImpl) GetPodContainerName(pod *api.Pod) (CgroupName, string) {
func (m *podContainerManagerImpl) GetPodContainerName(pod *v1.Pod) (CgroupName, string) {
podQOS := qos.GetPodQOS(pod)
// Get the parent QOS container name
var parentContainer string
@ -233,19 +233,19 @@ type podContainerManagerNoop struct {
// Make sure that podContainerManagerStub implements the PodContainerManager interface
var _ PodContainerManager = &podContainerManagerNoop{}
func (m *podContainerManagerNoop) Exists(_ *api.Pod) bool {
func (m *podContainerManagerNoop) Exists(_ *v1.Pod) bool {
return true
}
func (m *podContainerManagerNoop) EnsureExists(_ *api.Pod) error {
func (m *podContainerManagerNoop) EnsureExists(_ *v1.Pod) error {
return nil
}
func (m *podContainerManagerNoop) GetPodContainerName(_ *api.Pod) (CgroupName, string) {
func (m *podContainerManagerNoop) GetPodContainerName(_ *v1.Pod) (CgroupName, string) {
return m.cgroupRoot, string(m.cgroupRoot)
}
func (m *podContainerManagerNoop) GetPodContainerNameForDriver(_ *api.Pod) string {
func (m *podContainerManagerNoop) GetPodContainerNameForDriver(_ *v1.Pod) string {
return ""
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package cm
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
)
@ -26,15 +26,15 @@ type podContainerManagerStub struct {
var _ PodContainerManager = &podContainerManagerStub{}
func (m *podContainerManagerStub) Exists(_ *api.Pod) bool {
func (m *podContainerManagerStub) Exists(_ *v1.Pod) bool {
return true
}
func (m *podContainerManagerStub) EnsureExists(_ *api.Pod) error {
func (m *podContainerManagerStub) EnsureExists(_ *v1.Pod) error {
return nil
}
func (m *podContainerManagerStub) GetPodContainerName(_ *api.Pod) (CgroupName, string) {
func (m *podContainerManagerStub) GetPodContainerName(_ *v1.Pod) (CgroupName, string) {
return "", ""
}

View File

@ -19,7 +19,7 @@ limitations under the License.
package cm
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
)
@ -28,15 +28,15 @@ type unsupportedPodContainerManager struct {
var _ PodContainerManager = &unsupportedPodContainerManager{}
func (m *unsupportedPodContainerManager) Exists(_ *api.Pod) bool {
func (m *unsupportedPodContainerManager) Exists(_ *v1.Pod) bool {
return true
}
func (m *unsupportedPodContainerManager) EnsureExists(_ *api.Pod) error {
func (m *unsupportedPodContainerManager) EnsureExists(_ *v1.Pod) error {
return nil
}
func (m *unsupportedPodContainerManager) GetPodContainerName(_ *api.Pod) (CgroupName, string) {
func (m *unsupportedPodContainerManager) GetPodContainerName(_ *v1.Pod) (CgroupName, string) {
return "", ""
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package cm
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types"
)
@ -86,15 +86,15 @@ type QOSContainersInfo struct {
// containers for the pod.
type PodContainerManager interface {
// GetPodContainerName returns the CgroupName identifer, and its literal cgroupfs form on the host.
GetPodContainerName(*api.Pod) (CgroupName, string)
GetPodContainerName(*v1.Pod) (CgroupName, string)
// EnsureExists takes a pod as argument and makes sure that
// pod cgroup exists if qos cgroup hierarchy flag is enabled.
// If the pod cgroup doesen't already exist this method creates it.
EnsureExists(*api.Pod) error
EnsureExists(*v1.Pod) error
// Exists returns true if the pod cgroup exists.
Exists(*api.Pod) bool
Exists(*v1.Pod) bool
// Destroy takes a pod Cgroup name as argument and destroys the pod's container.
Destroy(name CgroupName) error

View File

@ -18,9 +18,12 @@ limitations under the License.
package config
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
@ -28,18 +31,22 @@ import (
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
func NewSourceApiserver(c *clientset.Clientset, nodeName types.NodeName, updates chan<- interface{}) {
lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", api.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
lw := cache.NewListWatchFromClient(c.Core().RESTClient(), "pods", v1.NamespaceAll, fields.OneTermEqualSelector(api.PodHostField, string(nodeName)))
newSourceApiserverFromLW(lw, updates)
}
// newSourceApiserverFromLW holds creates a config source that watches and pulls from the apiserver.
func newSourceApiserverFromLW(lw cache.ListerWatcher, updates chan<- interface{}) {
send := func(objs []interface{}) {
var pods []*api.Pod
var pods []*v1.Pod
for _, o := range objs {
pods = append(pods, o.(*api.Pod))
pod := o.(*v1.Pod)
if err := podutil.SetInitContainersAndStatuses(pod); err != nil {
glog.Error(err)
}
pods = append(pods, pod)
}
updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.ApiserverSource}
}
cache.NewReflector(lw, &api.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run()
cache.NewReflector(lw, &v1.Pod{}, cache.NewUndeltaStore(send, cache.MetaNamespaceKeyFunc), 0).Run()
}

View File

@ -19,7 +19,7 @@ package config
import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime"
@ -31,31 +31,31 @@ type fakePodLW struct {
watchResp watch.Interface
}
func (lw fakePodLW) List(options api.ListOptions) (runtime.Object, error) {
func (lw fakePodLW) List(options v1.ListOptions) (runtime.Object, error) {
return lw.listResp, nil
}
func (lw fakePodLW) Watch(options api.ListOptions) (watch.Interface, error) {
func (lw fakePodLW) Watch(options v1.ListOptions) (watch.Interface, error) {
return lw.watchResp, nil
}
var _ cache.ListerWatcher = fakePodLW{}
func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
pod1v1 := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "p"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}}
pod1v2 := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "p"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}}
pod2 := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "q"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
pod1v1 := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "p"},
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/one"}}}}
pod1v2 := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "p"},
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/two"}}}}
pod2 := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "q"},
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/blah"}}}}
// Setup fake api client.
fakeWatch := watch.NewFake()
lw := fakePodLW{
listResp: &api.PodList{Items: []api.Pod{*pod1v1}},
listResp: &v1.PodList{Items: []v1.Pod{*pod1v1}},
watchResp: fakeWatch,
}
@ -69,7 +69,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
}
update := got.(kubetypes.PodUpdate)
expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1)
if !api.Semantic.DeepEqual(expected, update) {
if !v1.Semantic.DeepEqual(expected, update) {
t.Errorf("Expected %#v; Got %#v", expected, update)
}
@ -84,7 +84,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
expectedA := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v1, pod2)
expectedB := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v1)
if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
if !v1.Semantic.DeepEqual(expectedA, update) && !v1.Semantic.DeepEqual(expectedB, update) {
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
}
@ -98,7 +98,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
expectedA = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod1v2, pod2)
expectedB = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2, pod1v2)
if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
if !v1.Semantic.DeepEqual(expectedA, update) && !v1.Semantic.DeepEqual(expectedB, update) {
t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
}
@ -110,7 +110,7 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
}
update = got.(kubetypes.PodUpdate)
expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource, pod2)
if !api.Semantic.DeepEqual(expected, update) {
if !v1.Semantic.DeepEqual(expected, update) {
t.Errorf("Expected %#v, Got %#v", expected, update)
}
@ -122,23 +122,23 @@ func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
}
update = got.(kubetypes.PodUpdate)
expected = CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource)
if !api.Semantic.DeepEqual(expected, update) {
if !v1.Semantic.DeepEqual(expected, update) {
t.Errorf("Expected %#v, Got %#v", expected, update)
}
}
func TestNewSourceApiserver_TwoNamespacesSameName(t *testing.T) {
pod1 := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "p", Namespace: "one"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}}
pod2 := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "p", Namespace: "two"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
pod1 := v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "p", Namespace: "one"},
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/one"}}}}
pod2 := v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "p", Namespace: "two"},
Spec: v1.PodSpec{Containers: []v1.Container{{Image: "image/blah"}}}}
// Setup fake api client.
fakeWatch := watch.NewFake()
lw := fakePodLW{
listResp: &api.PodList{Items: []api.Pod{pod1, pod2}},
listResp: &v1.PodList{Items: []v1.Pod{pod1, pod2}},
watchResp: fakeWatch,
}
@ -172,7 +172,7 @@ func TestNewSourceApiserverInitialEmptySendsEmptyPodUpdate(t *testing.T) {
// Setup fake api client.
fakeWatch := watch.NewFake()
lw := fakePodLW{
listResp: &api.PodList{Items: []api.Pod{}},
listResp: &v1.PodList{Items: []v1.Pod{}},
watchResp: fakeWatch,
}
@ -186,7 +186,7 @@ func TestNewSourceApiserverInitialEmptySendsEmptyPodUpdate(t *testing.T) {
}
update := got.(kubetypes.PodUpdate)
expected := CreatePodUpdate(kubetypes.SET, kubetypes.ApiserverSource)
if !api.Semantic.DeepEqual(expected, update) {
if !v1.Semantic.DeepEqual(expected, update) {
t.Errorf("Expected %#v; Got %#v", expected, update)
}
}

View File

@ -23,6 +23,7 @@ import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apimachinery/registered"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@ -88,7 +89,7 @@ func getSelfLink(name, namespace string) string {
type defaultFunc func(pod *api.Pod) error
func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *api.Pod, err error) {
func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *v1.Pod, err error) {
// JSON is valid YAML, so this should work for everything.
json, err := utilyaml.ToJSON(data)
if err != nil {
@ -112,10 +113,14 @@ func tryDecodeSinglePod(data []byte, defaultFn defaultFunc) (parsed bool, pod *a
err = fmt.Errorf("invalid pod: %v", errs)
return true, pod, err
}
return true, newPod, nil
v1Pod := &v1.Pod{}
if err := v1.Convert_api_Pod_To_v1_Pod(newPod, v1Pod, nil); err != nil {
return true, nil, err
}
return true, v1Pod, nil
}
func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api.PodList, err error) {
func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods v1.PodList, err error) {
obj, err := runtime.Decode(api.Codecs.UniversalDecoder(), data)
if err != nil {
return false, pods, err
@ -137,5 +142,9 @@ func tryDecodePodList(data []byte, defaultFn defaultFunc) (parsed bool, pods api
return true, pods, err
}
}
return true, *newPods, err
v1Pods := &v1.PodList{}
if err := v1.Convert_api_PodList_To_v1_PodList(newPods, v1Pods, nil); err != nil {
return true, pods, err
}
return true, *v1Pods, err
}

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/securitycontext"
@ -32,27 +33,27 @@ func noDefault(*api.Pod) error { return nil }
func TestDecodeSinglePod(t *testing.T) {
grace := int64(30)
pod := &api.Pod{
pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
APIVersion: "",
},
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "mynamespace",
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Containers: []v1.Container{{
Name: "image",
Image: "test/image",
ImagePullPolicy: "IfNotPresent",
TerminationMessagePath: "/dev/termination-log",
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}},
SecurityContext: &api.PodSecurityContext{},
SecurityContext: &v1.PodSecurityContext{},
},
}
json, err := runtime.Encode(testapi.Default.Codec(), pod)
@ -70,7 +71,7 @@ func TestDecodeSinglePod(t *testing.T) {
t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", pod, podOut, string(json))
}
for _, gv := range registered.EnabledVersionsForGroup(api.GroupName) {
for _, gv := range registered.EnabledVersionsForGroup(v1.GroupName) {
info, _ := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), "application/yaml")
encoder := api.Codecs.EncoderForVersion(info.Serializer, gv)
yaml, err := runtime.Encode(encoder, pod)
@ -92,31 +93,31 @@ func TestDecodeSinglePod(t *testing.T) {
func TestDecodePodList(t *testing.T) {
grace := int64(30)
pod := &api.Pod{
pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
APIVersion: "",
},
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "mynamespace",
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Containers: []v1.Container{{
Name: "image",
Image: "test/image",
ImagePullPolicy: "IfNotPresent",
TerminationMessagePath: "/dev/termination-log",
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}},
SecurityContext: &api.PodSecurityContext{},
SecurityContext: &v1.PodSecurityContext{},
},
}
podList := &api.PodList{
Items: []api.Pod{*pod},
podList := &v1.PodList{
Items: []v1.Pod{*pod},
}
json, err := runtime.Encode(testapi.Default.Codec(), podList)
if err != nil {
@ -133,7 +134,7 @@ func TestDecodePodList(t *testing.T) {
t.Errorf("expected:\n%#v\ngot:\n%#v\n%s", podList, &podListOut, string(json))
}
for _, gv := range registered.EnabledVersionsForGroup(api.GroupName) {
for _, gv := range registered.EnabledVersionsForGroup(v1.GroupName) {
info, _ := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), "application/yaml")
encoder := api.Codecs.EncoderForVersion(info.Serializer, gv)
yaml, err := runtime.Encode(encoder, podList)

View File

@ -23,6 +23,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -116,7 +117,7 @@ func (c *PodConfig) Sync() {
type podStorage struct {
podLock sync.RWMutex
// map of source name to pod name to pod reference
pods map[string]map[string]*api.Pod
pods map[string]map[string]*v1.Pod
mode PodConfigNotificationMode
// ensures that updates are delivered in strict order
@ -137,7 +138,7 @@ type podStorage struct {
// TODO: allow initialization of the current state of the store with snapshotted version.
func newPodStorage(updates chan<- kubetypes.PodUpdate, mode PodConfigNotificationMode, recorder record.EventRecorder) *podStorage {
return &podStorage{
pods: make(map[string]map[string]*api.Pod),
pods: make(map[string]map[string]*v1.Pod),
mode: mode,
updates: updates,
sourcesSeen: sets.String{},
@ -184,7 +185,7 @@ func (s *podStorage) Merge(source string, change interface{}) error {
case PodConfigNotificationSnapshotAndUpdates:
if len(removes.Pods) > 0 || len(adds.Pods) > 0 || firstSet {
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source}
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source}
}
if len(updates.Pods) > 0 {
s.updates <- *updates
@ -195,7 +196,7 @@ func (s *podStorage) Merge(source string, change interface{}) error {
case PodConfigNotificationSnapshot:
if len(updates.Pods) > 0 || len(deletes.Pods) > 0 || len(adds.Pods) > 0 || len(removes.Pods) > 0 || firstSet {
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: source}
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*v1.Pod), Op: kubetypes.SET, Source: source}
}
case PodConfigNotificationUnknown:
@ -211,21 +212,21 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
s.podLock.Lock()
defer s.podLock.Unlock()
addPods := []*api.Pod{}
updatePods := []*api.Pod{}
deletePods := []*api.Pod{}
removePods := []*api.Pod{}
reconcilePods := []*api.Pod{}
addPods := []*v1.Pod{}
updatePods := []*v1.Pod{}
deletePods := []*v1.Pod{}
removePods := []*v1.Pod{}
reconcilePods := []*v1.Pod{}
pods := s.pods[source]
if pods == nil {
pods = make(map[string]*api.Pod)
pods = make(map[string]*v1.Pod)
}
// updatePodFunc is the local function which updates the pod cache *oldPods* with new pods *newPods*.
// After updated, new pod will be stored in the pod cache *pods*.
// Notice that *pods* and *oldPods* could be the same cache.
updatePodsFunc := func(newPods []*api.Pod, oldPods, pods map[string]*api.Pod) {
updatePodsFunc := func(newPods []*v1.Pod, oldPods, pods map[string]*v1.Pod) {
filtered := filterInvalidPods(newPods, source, s.recorder)
for _, ref := range filtered {
name := kubecontainer.GetPodFullName(ref)
@ -282,7 +283,7 @@ func (s *podStorage) merge(source string, change interface{}) (adds, updates, de
s.markSourceSet(source)
// Clear the old map entries by just creating a new map
oldPods := pods
pods = make(map[string]*api.Pod)
pods = make(map[string]*v1.Pod)
updatePodsFunc(update.Pods, oldPods, pods)
for name, existing := range oldPods {
if _, found := pods[name]; !found {
@ -319,11 +320,19 @@ func (s *podStorage) seenSources(sources ...string) bool {
return s.sourcesSeen.HasAll(sources...)
}
func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventRecorder) (filtered []*api.Pod) {
func filterInvalidPods(pods []*v1.Pod, source string, recorder record.EventRecorder) (filtered []*v1.Pod) {
names := sets.String{}
for i, pod := range pods {
var errlist field.ErrorList
if errs := validation.ValidatePod(pod); len(errs) != 0 {
// TODO: remove the conversion when validation is performed on versioned objects.
internalPod := &api.Pod{}
if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil {
name := kubecontainer.GetPodFullName(pod)
glog.Warningf("Pod[%d] (%s) from %s failed to convert to v1, ignoring: %v", i+1, name, source, err)
recorder.Eventf(pod, v1.EventTypeWarning, "FailedConversion", "Error converting pod %s from %s, ignoring: %v", name, source, err)
continue
}
if errs := validation.ValidatePod(internalPod); len(errs) != 0 {
errlist = append(errlist, errs...)
// If validation fails, don't trust it any further -
// even Name could be bad.
@ -341,7 +350,7 @@ func filterInvalidPods(pods []*api.Pod, source string, recorder record.EventReco
name := bestPodIdentString(pod)
err := errlist.ToAggregate()
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
recorder.Eventf(pod, api.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
recorder.Eventf(pod, v1.EventTypeWarning, events.FailedValidation, "Error validating pod %s from %s, ignoring: %v", name, source, err)
continue
}
filtered = append(filtered, pod)
@ -393,14 +402,14 @@ func isAnnotationMapEqual(existingMap, candidateMap map[string]string) bool {
}
// recordFirstSeenTime records the first seen time of this pod.
func recordFirstSeenTime(pod *api.Pod) {
func recordFirstSeenTime(pod *v1.Pod) {
glog.V(4).Infof("Receiving a new pod %q", format.Pod(pod))
pod.Annotations[kubetypes.ConfigFirstSeenAnnotationKey] = kubetypes.NewTimestamp().GetString()
}
// updateAnnotations returns an Annotation map containing the api annotation map plus
// locally managed annotations
func updateAnnotations(existing, ref *api.Pod) {
func updateAnnotations(existing, ref *v1.Pod) {
annotations := make(map[string]string, len(ref.Annotations)+len(localAnnotations))
for k, v := range ref.Annotations {
annotations[k] = v
@ -413,7 +422,7 @@ func updateAnnotations(existing, ref *api.Pod) {
existing.Annotations = annotations
}
func podsDifferSemantically(existing, ref *api.Pod) bool {
func podsDifferSemantically(existing, ref *v1.Pod) bool {
if reflect.DeepEqual(existing.Spec, ref.Spec) &&
reflect.DeepEqual(existing.Labels, ref.Labels) &&
reflect.DeepEqual(existing.DeletionTimestamp, ref.DeletionTimestamp) &&
@ -430,7 +439,7 @@ func podsDifferSemantically(existing, ref *api.Pod) bool {
// * if ref makes no meaningful change, but changes the pod status, returns needReconcile=true
// * else return all false
// Now, needUpdate, needGracefulDelete and needReconcile should never be both true
func checkAndUpdatePod(existing, ref *api.Pod) (needUpdate, needReconcile, needGracefulDelete bool) {
func checkAndUpdatePod(existing, ref *v1.Pod) (needUpdate, needReconcile, needGracefulDelete bool) {
// 1. this is a reconcile
// TODO: it would be better to update the whole object and only preserve certain things
@ -474,27 +483,27 @@ func checkAndUpdatePod(existing, ref *api.Pod) (needUpdate, needReconcile, needG
func (s *podStorage) Sync() {
s.updateLock.Lock()
defer s.updateLock.Unlock()
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*api.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource}
s.updates <- kubetypes.PodUpdate{Pods: s.MergedState().([]*v1.Pod), Op: kubetypes.SET, Source: kubetypes.AllSource}
}
// Object implements config.Accessor
func (s *podStorage) MergedState() interface{} {
s.podLock.RLock()
defer s.podLock.RUnlock()
pods := make([]*api.Pod, 0)
pods := make([]*v1.Pod, 0)
for _, sourcePods := range s.pods {
for _, podRef := range sourcePods {
pod, err := api.Scheme.Copy(podRef)
if err != nil {
glog.Errorf("unable to copy pod: %v", err)
}
pods = append(pods, pod.(*api.Pod))
pods = append(pods, pod.(*v1.Pod))
}
}
return pods
}
func bestPodIdentString(pod *api.Pod) string {
func bestPodIdentString(pod *v1.Pod) string {
namespace := pod.Namespace
if namespace == "" {
namespace = "<empty-namespace>"
@ -506,15 +515,15 @@ func bestPodIdentString(pod *api.Pod) string {
return fmt.Sprintf("%s.%s", name, namespace)
}
func copyPods(sourcePods []*api.Pod) []*api.Pod {
pods := []*api.Pod{}
func copyPods(sourcePods []*v1.Pod) []*v1.Pod {
pods := []*v1.Pod{}
for _, source := range sourcePods {
// Use a deep copy here just in case
pod, err := api.Scheme.Copy(source)
if err != nil {
glog.Errorf("unable to copy pod: %v", err)
}
pods = append(pods, pod.(*api.Pod))
pods = append(pods, pod.(*v1.Pod))
}
return pods
}

View File

@ -24,8 +24,8 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/conversion"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@ -45,7 +45,7 @@ func expectEmptyChannel(t *testing.T, ch <-chan interface{}) {
}
}
type sortedPods []*api.Pod
type sortedPods []*v1.Pod
func (s sortedPods) Len() int {
return len(s)
@ -57,17 +57,17 @@ func (s sortedPods) Less(i, j int) bool {
return s[i].Namespace < s[j].Namespace
}
func CreateValidPod(name, namespace string) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
func CreateValidPod(name, namespace string) *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: types.UID(name), // for the purpose of testing, this is unique enough
Name: name,
Namespace: namespace,
},
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
Containers: []api.Container{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
Containers: []v1.Container{
{
Name: "ctr",
Image: "image",
@ -79,13 +79,13 @@ func CreateValidPod(name, namespace string) *api.Pod {
}
}
func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*api.Pod) kubetypes.PodUpdate {
func CreatePodUpdate(op kubetypes.PodOperation, source string, pods ...*v1.Pod) kubetypes.PodUpdate {
return kubetypes.PodUpdate{Pods: pods, Op: op, Source: source}
}
func createPodConfigTester(mode PodConfigNotificationMode) (chan<- interface{}, <-chan kubetypes.PodUpdate, *PodConfig) {
eventBroadcaster := record.NewBroadcaster()
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet"}))
config := NewPodConfig(mode, eventBroadcaster.NewRecorder(v1.EventSource{Component: "kubelet"}))
channel := config.Channel(TestSource)
ch := config.Updates()
return channel, ch, config
@ -100,7 +100,7 @@ func expectPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate, expected ...ku
// except for "Pods", which are compared separately below.
expectedCopy, updateCopy := expected[i], update
expectedCopy.Pods, updateCopy.Pods = nil, nil
if !api.Semantic.DeepEqual(expectedCopy, updateCopy) {
if !v1.Semantic.DeepEqual(expectedCopy, updateCopy) {
t.Fatalf("Expected %#v, Got %#v", expectedCopy, updateCopy)
}
@ -186,7 +186,7 @@ func TestInvalidPodFiltered(t *testing.T) {
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new")))
// add an invalid update
podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}})
podUpdate = CreatePodUpdate(kubetypes.UPDATE, TestSource, &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo"}})
channel <- podUpdate
expectNoPodUpdate(t, ch)
}
@ -204,7 +204,7 @@ func TestNewPodAddedSnapshotAndUpdates(t *testing.T) {
// container updates are separated as UPDATE
pod := *podUpdate.Pods[0]
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod)
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, &pod))
}
@ -222,7 +222,7 @@ func TestNewPodAddedSnapshot(t *testing.T) {
// container updates are separated as UPDATE
pod := *podUpdate.Pods[0]
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
channel <- CreatePodUpdate(kubetypes.ADD, TestSource, &pod)
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.SET, TestSource, &pod))
}
@ -240,12 +240,12 @@ func TestNewPodAddedUpdatedRemoved(t *testing.T) {
// an kubetypes.ADD should be converted to kubetypes.UPDATE
pod := CreateValidPod("foo", "new")
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
podUpdate = CreatePodUpdate(kubetypes.ADD, TestSource, pod)
channel <- podUpdate
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.UPDATE, TestSource, pod))
podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "new"}})
podUpdate = CreatePodUpdate(kubetypes.REMOVE, TestSource, &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "new"}})
channel <- podUpdate
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.REMOVE, TestSource, pod))
}
@ -282,7 +282,7 @@ func TestNewPodAddedUpdatedSet(t *testing.T) {
// should be converted to an kubetypes.ADD, kubetypes.REMOVE, and kubetypes.UPDATE
pod := CreateValidPod("foo2", "new")
pod.Spec.Containers = []api.Container{{Name: "bar", Image: "test", ImagePullPolicy: api.PullIfNotPresent}}
pod.Spec.Containers = []v1.Container{{Name: "bar", Image: "test", ImagePullPolicy: v1.PullIfNotPresent}}
podUpdate = CreatePodUpdate(kubetypes.SET, TestSource, pod, CreateValidPod("foo3", "new"), CreateValidPod("foo4", "new"))
channel <- podUpdate
expectPodUpdate(t, ch,
@ -294,14 +294,14 @@ func TestNewPodAddedUpdatedSet(t *testing.T) {
func TestNewPodAddedSetReconciled(t *testing.T) {
// Create and touch new test pods, return the new pods and touched pod. We should create new pod list
// before touching to avoid data race.
newTestPods := func(touchStatus, touchSpec bool) ([]*api.Pod, *api.Pod) {
pods := []*api.Pod{
newTestPods := func(touchStatus, touchSpec bool) ([]*v1.Pod, *v1.Pod) {
pods := []*v1.Pod{
CreateValidPod("changeable-pod-0", "new"),
CreateValidPod("constant-pod-1", "new"),
CreateValidPod("constant-pod-2", "new"),
}
if touchStatus {
pods[0].Status = api.PodStatus{Message: strconv.Itoa(rand.Int())}
pods[0].Status = v1.PodStatus{Message: strconv.Itoa(rand.Int())}
}
if touchSpec {
pods[0].Spec.Containers[0].Name = strconv.Itoa(rand.Int())
@ -312,7 +312,7 @@ func TestNewPodAddedSetReconciled(t *testing.T) {
kubetypes.ADD,
kubetypes.SET,
} {
var podWithStatusChange *api.Pod
var podWithStatusChange *v1.Pod
pods, _ := newTestPods(false, false)
channel, ch, _ := createPodConfigTester(PodConfigNotificationIncremental)
@ -373,7 +373,7 @@ func TestPodUpdateAnnotations(t *testing.T) {
t.Fatalf("%v", err)
}
podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*api.Pod), CreateValidPod("foo3", "new"))
podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, CreateValidPod("foo1", "new"), clone.(*v1.Pod), CreateValidPod("foo3", "new"))
channel <- podUpdate
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo1", "new"), pod, CreateValidPod("foo3", "new")))
@ -405,7 +405,7 @@ func TestPodUpdateLabels(t *testing.T) {
t.Fatalf("%v", err)
}
podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, clone.(*api.Pod))
podUpdate := CreatePodUpdate(kubetypes.SET, TestSource, clone.(*v1.Pod))
channel <- podUpdate
expectPodUpdate(t, ch, CreatePodUpdate(kubetypes.ADD, TestSource, pod))

View File

@ -28,6 +28,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
@ -50,9 +51,9 @@ func NewSourceFile(path string, nodeName types.NodeName, period time.Duration, u
func new(path string, nodeName types.NodeName, period time.Duration, updates chan<- interface{}) *sourceFile {
send := func(objs []interface{}) {
var pods []*api.Pod
var pods []*v1.Pod
for _, o := range objs {
pods = append(pods, o.(*api.Pod))
pods = append(pods, o.(*v1.Pod))
}
updates <- kubetypes.PodUpdate{Pods: pods, Op: kubetypes.SET, Source: kubetypes.FileSource}
}
@ -84,7 +85,7 @@ func (s *sourceFile) resetStoreFromPath() error {
return err
}
// Emit an update with an empty PodList to allow FileSource to be marked as seen
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
return fmt.Errorf("path does not exist, ignoring")
}
@ -116,13 +117,13 @@ func (s *sourceFile) resetStoreFromPath() error {
// Get as many pod configs as we can from a directory. Return an error if and only if something
// prevented us from reading anything at all. Do not return an error if only some files
// were problematic.
func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) {
func (s *sourceFile) extractFromDir(name string) ([]*v1.Pod, error) {
dirents, err := filepath.Glob(filepath.Join(name, "[^.]*"))
if err != nil {
return nil, fmt.Errorf("glob failed: %v", err)
}
pods := make([]*api.Pod, 0)
pods := make([]*v1.Pod, 0)
if len(dirents) == 0 {
return pods, nil
}
@ -152,7 +153,7 @@ func (s *sourceFile) extractFromDir(name string) ([]*api.Pod, error) {
return pods, nil
}
func (s *sourceFile) extractFromFile(filename string) (pod *api.Pod, err error) {
func (s *sourceFile) extractFromFile(filename string) (pod *v1.Pod, err error) {
glog.V(3).Infof("Reading config file %q", filename)
defer func() {
if err == nil && pod != nil {
@ -192,7 +193,7 @@ func (s *sourceFile) extractFromFile(filename string) (pod *api.Pod, err error)
filename, string(data), podErr)
}
func (s *sourceFile) replaceStore(pods ...*api.Pod) (err error) {
func (s *sourceFile) replaceStore(pods ...*v1.Pod) (err error) {
objs := []interface{}{}
for _, pod := range pods {
objs = append(objs, pod)

View File

@ -26,7 +26,7 @@ import (
"github.com/golang/glog"
"golang.org/x/exp/inotify"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
@ -45,7 +45,7 @@ func (s *sourceFile) watch() error {
return err
}
// Emit an update with an empty PodList to allow FileSource to be marked as seen
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.FileSource}
return fmt.Errorf("path does not exist, ignoring")
}

View File

@ -29,9 +29,10 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/validation"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/runtime"
@ -57,7 +58,7 @@ func TestUpdateOnNonExistentFile(t *testing.T) {
case got := <-ch:
update := got.(kubetypes.PodUpdate)
expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource)
if !api.Semantic.DeepDerivative(expected, update) {
if !v1.Semantic.DeepDerivative(expected, update) {
t.Fatalf("expected %#v, Got %#v", expected, update)
}
@ -85,11 +86,16 @@ func TestReadPodsFromFileExistAlready(t *testing.T) {
case got := <-ch:
update := got.(kubetypes.PodUpdate)
for _, pod := range update.Pods {
if errs := validation.ValidatePod(pod); len(errs) > 0 {
t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs)
// TODO: remove the conversion when validation is performed on versioned objects.
internalPod := &api.Pod{}
if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil {
t.Fatalf("%s: Cannot convert pod %#v, %#v", testCase.desc, pod, err)
}
if errs := validation.ValidatePod(internalPod); len(errs) > 0 {
t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, internalPod, errs)
}
}
if !api.Semantic.DeepEqual(testCase.expected, update) {
if !v1.Semantic.DeepEqual(testCase.expected, update) {
t.Fatalf("%s: Expected %#v, Got %#v", testCase.desc, testCase.expected, update)
}
case <-time.After(wait.ForeverTestTimeout):
@ -153,7 +159,7 @@ func TestExtractFromEmptyDir(t *testing.T) {
update := (<-ch).(kubetypes.PodUpdate)
expected := CreatePodUpdate(kubetypes.SET, kubetypes.FileSource)
if !api.Semantic.DeepEqual(expected, update) {
if !v1.Semantic.DeepEqual(expected, update) {
t.Fatalf("expected %#v, Got %#v", expected, update)
}
}
@ -169,47 +175,47 @@ func getTestCases(hostname types.NodeName) []*testCase {
return []*testCase{
{
desc: "Simple pod",
pod: &api.Pod{
pod: &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "",
},
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "test",
UID: "12345",
Namespace: "mynamespace",
},
Spec: api.PodSpec{
Containers: []api.Container{{Name: "image", Image: "test/image", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}},
SecurityContext: &api.PodSecurityContext{},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "image", Image: "test/image", SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}},
SecurityContext: &v1.PodSecurityContext{},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &api.Pod{
ObjectMeta: api.ObjectMeta{
expected: CreatePodUpdate(kubetypes.SET, kubetypes.FileSource, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test-" + string(hostname),
UID: "12345",
Namespace: "mynamespace",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "12345"},
SelfLink: getSelfLink("test-"+string(hostname), "mynamespace"),
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: string(hostname),
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Containers: []v1.Container{{
Name: "image",
Image: "test/image",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: "Always",
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults()}},
SecurityContext: &api.PodSecurityContext{},
SecurityContext: &v1.PodSecurityContext{},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
}),
},
@ -312,7 +318,7 @@ func watchFileChanged(watchDir bool, t *testing.T) {
lock.Lock()
defer lock.Unlock()
pod := testCase.pod.(*api.Pod)
pod := testCase.pod.(*v1.Pod)
pod.Spec.Containers[0].Name = "image2"
testCase.expected.Pods[0].Spec.Containers[0].Name = "image2"
@ -355,12 +361,17 @@ func expectUpdate(t *testing.T, ch chan interface{}, testCase *testCase) {
case got := <-ch:
update := got.(kubetypes.PodUpdate)
for _, pod := range update.Pods {
if errs := validation.ValidatePod(pod); len(errs) > 0 {
t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, pod, errs)
// TODO: remove the conversion when validation is performed on versioned objects.
internalPod := &api.Pod{}
if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil {
t.Fatalf("%s: Cannot convert pod %#v, %#v", testCase.desc, pod, err)
}
if errs := validation.ValidatePod(internalPod); len(errs) > 0 {
t.Fatalf("%s: Invalid pod %#v, %#v", testCase.desc, internalPod, errs)
}
}
if !api.Semantic.DeepEqual(testCase.expected, update) {
if !v1.Semantic.DeepEqual(testCase.expected, update) {
t.Fatalf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
}
return

View File

@ -25,6 +25,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/wait"
@ -101,7 +102,7 @@ func (s *sourceURL) extractFromURL() error {
}
if len(data) == 0 {
// Emit an update with an empty PodList to allow HTTPSource to be marked as seen
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
return fmt.Errorf("zero-length data received from %v", s.url)
}
// Short circuit if the data has not changed since the last time it was read.
@ -117,7 +118,7 @@ func (s *sourceURL) extractFromURL() error {
// It parsed but could not be used.
return singlePodErr
}
s.updates <- kubetypes.PodUpdate{Pods: []*api.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
s.updates <- kubetypes.PodUpdate{Pods: []*v1.Pod{pod}, Op: kubetypes.SET, Source: kubetypes.HTTPSource}
return nil
}
@ -128,7 +129,7 @@ func (s *sourceURL) extractFromURL() error {
// It parsed but could not be used.
return multiPodErr
}
pods := make([]*api.Pod, 0)
pods := make([]*v1.Pod, 0)
for i := range podList.Items {
pods = append(pods, &podList.Items[i])
}

View File

@ -23,9 +23,10 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apimachinery/registered"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
@ -56,49 +57,49 @@ func TestExtractFromHttpBadness(t *testing.T) {
func TestExtractInvalidPods(t *testing.T) {
var testCases = []struct {
desc string
pod *api.Pod
pod *v1.Pod
}{
{
desc: "No version",
pod: &api.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: ""}},
pod: &v1.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: ""}},
},
{
desc: "Invalid version",
pod: &api.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: "v1betta2"}},
pod: &v1.Pod{TypeMeta: unversioned.TypeMeta{APIVersion: "v1betta2"}},
},
{
desc: "Invalid volume name",
pod: &api.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "_INVALID_"}},
pod: &v1.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
Spec: v1.PodSpec{
Volumes: []v1.Volume{{Name: "_INVALID_"}},
},
},
},
{
desc: "Duplicate volume names",
pod: &api.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "repeated"}, {Name: "repeated"}},
pod: &v1.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
Spec: v1.PodSpec{
Volumes: []v1.Volume{{Name: "repeated"}, {Name: "repeated"}},
},
},
},
{
desc: "Unspecified container name",
pod: &api.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
Spec: api.PodSpec{
Containers: []api.Container{{Name: ""}},
pod: &v1.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: ""}},
},
},
},
{
desc: "Invalid container name",
pod: &api.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
Spec: api.PodSpec{
Containers: []api.Container{{Name: "_INVALID_"}},
pod: &v1.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "_INVALID_"}},
},
},
},
@ -133,144 +134,144 @@ func TestExtractPodsFromHTTP(t *testing.T) {
}{
{
desc: "Single pod",
pods: &api.Pod{
pods: &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "",
},
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
UID: "111",
Namespace: "mynamespace",
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: string(nodeName),
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
SecurityContext: &api.PodSecurityContext{},
Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}},
SecurityContext: &v1.PodSecurityContext{},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
expected: CreatePodUpdate(kubetypes.SET,
kubetypes.HTTPSource,
&api.Pod{
ObjectMeta: api.ObjectMeta{
&v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "111",
Name: "foo" + "-" + nodeName,
Namespace: "mynamespace",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"},
SelfLink: getSelfLink("foo-"+nodeName, "mynamespace"),
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: nodeName,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
SecurityContext: &api.PodSecurityContext{},
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
SecurityContext: &v1.PodSecurityContext{},
TerminationGracePeriodSeconds: &grace,
Containers: []api.Container{{
Containers: []v1.Container{{
Name: "1",
Image: "foo",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: "Always",
}},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
}),
},
{
desc: "Multiple pods",
pods: &api.PodList{
pods: &v1.PodList{
TypeMeta: unversioned.TypeMeta{
Kind: "PodList",
APIVersion: "",
},
Items: []api.Pod{
Items: []v1.Pod{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
UID: "111",
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
SecurityContext: &api.PodSecurityContext{},
Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}},
SecurityContext: &v1.PodSecurityContext{},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "bar",
UID: "222",
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []api.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}},
SecurityContext: &api.PodSecurityContext{},
Containers: []v1.Container{{Name: "2", Image: "bar:bartag", ImagePullPolicy: ""}},
SecurityContext: &v1.PodSecurityContext{},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
},
},
expected: CreatePodUpdate(kubetypes.SET,
kubetypes.HTTPSource,
&api.Pod{
ObjectMeta: api.ObjectMeta{
&v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "111",
Name: "foo" + "-" + nodeName,
Namespace: "default",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "111"},
SelfLink: getSelfLink("foo-"+nodeName, kubetypes.NamespaceDefault),
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: nodeName,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
SecurityContext: &api.PodSecurityContext{},
SecurityContext: &v1.PodSecurityContext{},
Containers: []api.Container{{
Containers: []v1.Container{{
Name: "1",
Image: "foo",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: "Always",
}},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
&api.Pod{
ObjectMeta: api.ObjectMeta{
&v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "222",
Name: "bar" + "-" + nodeName,
Namespace: "default",
Annotations: map[string]string{kubetypes.ConfigHashAnnotationKey: "222"},
SelfLink: getSelfLink("bar-"+nodeName, kubetypes.NamespaceDefault),
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: nodeName,
RestartPolicy: api.RestartPolicyAlways,
DNSPolicy: api.DNSClusterFirst,
RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: v1.DNSClusterFirst,
TerminationGracePeriodSeconds: &grace,
SecurityContext: &api.PodSecurityContext{},
SecurityContext: &v1.PodSecurityContext{},
Containers: []api.Container{{
Containers: []v1.Container{{
Name: "2",
Image: "bar:bartag",
TerminationMessagePath: "/dev/termination-log",
ImagePullPolicy: "IfNotPresent",
}},
},
Status: api.PodStatus{
Phase: api.PodPending,
Status: v1.PodStatus{
Phase: v1.PodPending,
},
}),
},
@ -300,11 +301,16 @@ func TestExtractPodsFromHTTP(t *testing.T) {
}
update := (<-ch).(kubetypes.PodUpdate)
if !api.Semantic.DeepEqual(testCase.expected, update) {
if !v1.Semantic.DeepEqual(testCase.expected, update) {
t.Errorf("%s: Expected: %#v, Got: %#v", testCase.desc, testCase.expected, update)
}
for _, pod := range update.Pods {
if errs := validation.ValidatePod(pod); len(errs) != 0 {
// TODO: remove the conversion when validation is performed on versioned objects.
internalPod := &api.Pod{}
if err := v1.Convert_v1_Pod_To_api_Pod(pod, internalPod, nil); err != nil {
t.Fatalf("%s: Cannot convert pod %#v, %#v", testCase.desc, pod, err)
}
if errs := validation.ValidatePod(internalPod); len(errs) != 0 {
t.Errorf("%s: Expected no validation errors on %#v, Got %v", testCase.desc, pod, errs.ToAggregate())
}
}
@ -312,19 +318,19 @@ func TestExtractPodsFromHTTP(t *testing.T) {
}
func TestURLWithHeader(t *testing.T) {
pod := &api.Pod{
pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
Kind: "Pod",
},
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
UID: "111",
Namespace: "mynamespace",
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: "localhost",
Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}},
Containers: []v1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1.PullAlways}},
},
}
data, err := json.Marshal(pod)

View File

@ -19,7 +19,7 @@ package container
import (
"sync"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
// RefManager manages the references for the containers.
@ -28,17 +28,17 @@ import (
// for the caller.
type RefManager struct {
sync.RWMutex
containerIDToRef map[ContainerID]*api.ObjectReference
containerIDToRef map[ContainerID]*v1.ObjectReference
}
// NewRefManager creates and returns a container reference manager
// with empty contents.
func NewRefManager() *RefManager {
return &RefManager{containerIDToRef: make(map[ContainerID]*api.ObjectReference)}
return &RefManager{containerIDToRef: make(map[ContainerID]*v1.ObjectReference)}
}
// SetRef stores a reference to a pod's container, associating it with the given container ID.
func (c *RefManager) SetRef(id ContainerID, ref *api.ObjectReference) {
func (c *RefManager) SetRef(id ContainerID, ref *v1.ObjectReference) {
c.Lock()
defer c.Unlock()
c.containerIDToRef[id] = ref
@ -52,7 +52,7 @@ func (c *RefManager) ClearRef(id ContainerID) {
}
// GetRef returns the container reference of the given ID, or (nil, false) if none is stored.
func (c *RefManager) GetRef(id ContainerID) (ref *api.ObjectReference, ok bool) {
func (c *RefManager) GetRef(id ContainerID) (ref *v1.ObjectReference, ok bool) {
c.RLock()
defer c.RUnlock()
ref, ok = c.containerIDToRef[id]

View File

@ -25,8 +25,8 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/util/format"
@ -39,25 +39,25 @@ import (
// HandlerRunner runs a lifecycle handler for a container.
type HandlerRunner interface {
Run(containerID ContainerID, pod *api.Pod, container *api.Container, handler *api.Handler) (string, error)
Run(containerID ContainerID, pod *v1.Pod, container *v1.Container, handler *v1.Handler) (string, error)
}
// RuntimeHelper wraps kubelet to make container runtime
// able to get necessary informations like the RunContainerOptions, DNS settings.
type RuntimeHelper interface {
GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*RunContainerOptions, error)
GetClusterDNS(pod *api.Pod) (dnsServers []string, dnsSearches []string, err error)
GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*RunContainerOptions, error)
GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, err error)
GetPodDir(podUID types.UID) string
GeneratePodHostNameAndDomain(pod *api.Pod) (hostname string, hostDomain string, err error)
GeneratePodHostNameAndDomain(pod *v1.Pod) (hostname string, hostDomain string, err error)
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64
GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64
}
// ShouldContainerBeRestarted checks whether a container needs to be restarted.
// TODO(yifan): Think about how to refactor this.
func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatus *PodStatus) bool {
func ShouldContainerBeRestarted(container *v1.Container, pod *v1.Pod, podStatus *PodStatus) bool {
// Get latest container status.
status := podStatus.FindContainerStatusByName(container.Name)
// If the container was never started before, we should start it.
@ -74,11 +74,11 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu
return true
}
// Check RestartPolicy for dead container
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
if pod.Spec.RestartPolicy == v1.RestartPolicyNever {
glog.V(4).Infof("Already ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
return false
}
if pod.Spec.RestartPolicy == api.RestartPolicyOnFailure {
if pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {
// Check the exit code.
if status.ExitCode == 0 {
glog.V(4).Infof("Already successfully ran container %q of pod %q, do nothing", container.Name, format.Pod(pod))
@ -90,7 +90,7 @@ func ShouldContainerBeRestarted(container *api.Container, pod *api.Pod, podStatu
// HashContainer returns the hash of the container. It is used to compare
// the running container with its desired spec.
func HashContainer(container *api.Container) uint64 {
func HashContainer(container *v1.Container) uint64 {
hash := adler32.New()
hashutil.DeepHashObject(hash, *container)
return uint64(hash.Sum32())
@ -107,7 +107,7 @@ func EnvVarsToMap(envs []EnvVar) map[string]string {
return result
}
func ExpandContainerCommandAndArgs(container *api.Container, envs []EnvVar) (command []string, args []string) {
func ExpandContainerCommandAndArgs(container *v1.Container, envs []EnvVar) (command []string, args []string) {
mapping := expansion.MappingFuncFor(EnvVarsToMap(envs))
if len(container.Command) != 0 {
@ -136,11 +136,11 @@ type innerEventRecorder struct {
recorder record.EventRecorder
}
func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*api.ObjectReference, bool) {
func (irecorder *innerEventRecorder) shouldRecordEvent(object runtime.Object) (*v1.ObjectReference, bool) {
if object == nil {
return nil, false
}
if ref, ok := object.(*api.ObjectReference); ok {
if ref, ok := object.(*v1.ObjectReference); ok {
if !strings.HasPrefix(ref.FieldPath, ImplicitContainerPrefix) {
return ref, true
}
@ -168,8 +168,8 @@ func (irecorder *innerEventRecorder) PastEventf(object runtime.Object, timestamp
}
// Pod must not be nil.
func IsHostNetworkPod(pod *api.Pod) bool {
return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork
func IsHostNetworkPod(pod *v1.Pod) bool {
return pod.Spec.HostNetwork
}
// TODO(random-liu): Convert PodStatus to running Pod, should be deprecated soon

View File

@ -20,7 +20,7 @@ import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
func TestEnvVarsToMap(t *testing.T) {
@ -53,18 +53,18 @@ func TestEnvVarsToMap(t *testing.T) {
func TestExpandCommandAndArgs(t *testing.T) {
cases := []struct {
name string
container *api.Container
container *v1.Container
envs []EnvVar
expectedCommand []string
expectedArgs []string
}{
{
name: "none",
container: &api.Container{},
container: &v1.Container{},
},
{
name: "command expanded",
container: &api.Container{
container: &v1.Container{
Command: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"},
},
envs: []EnvVar{
@ -81,7 +81,7 @@ func TestExpandCommandAndArgs(t *testing.T) {
},
{
name: "args expanded",
container: &api.Container{
container: &v1.Container{
Args: []string{"zap", "$(VAR_TEST)", "$(VAR_TEST2)"},
},
envs: []EnvVar{
@ -98,7 +98,7 @@ func TestExpandCommandAndArgs(t *testing.T) {
},
{
name: "both expanded",
container: &api.Container{
container: &v1.Container{
Command: []string{"$(VAR_TEST2)--$(VAR_TEST)", "foo", "$(VAR_TEST3)"},
Args: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"},
},
@ -136,14 +136,14 @@ func TestExpandCommandAndArgs(t *testing.T) {
}
func TestShouldContainerBeRestarted(t *testing.T) {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "no-history"},
{Name: "alive"},
{Name: "succeed"},
@ -187,10 +187,10 @@ func TestShouldContainerBeRestarted(t *testing.T) {
},
},
}
policies := []api.RestartPolicy{
api.RestartPolicyNever,
api.RestartPolicyOnFailure,
api.RestartPolicyAlways,
policies := []v1.RestartPolicy{
v1.RestartPolicyNever,
v1.RestartPolicyOnFailure,
v1.RestartPolicyAlways,
}
expected := map[string][]bool{
"no-history": {true, true, true},

View File

@ -19,25 +19,25 @@ package container
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
var ImplicitContainerPrefix string = "implicitly required container "
// GenerateContainerRef returns an *api.ObjectReference which references the given container
// GenerateContainerRef returns an *v1.ObjectReference which references the given container
// within the given pod. Returns an error if the reference can't be constructed or the
// container doesn't actually belong to the pod.
//
// This function will return an error if the provided Pod does not have a selfLink,
// but we expect selfLink to be populated at all call sites for the function.
func GenerateContainerRef(pod *api.Pod, container *api.Container) (*api.ObjectReference, error) {
func GenerateContainerRef(pod *v1.Pod, container *v1.Container) (*v1.ObjectReference, error) {
fieldPath, err := fieldPath(pod, container)
if err != nil {
// TODO: figure out intelligent way to refer to containers that we implicitly
// start (like the pod infra container). This is not a good way, ugh.
fieldPath = ImplicitContainerPrefix + container.Name
}
ref, err := api.GetPartialReference(pod, fieldPath)
ref, err := v1.GetPartialReference(pod, fieldPath)
if err != nil {
return nil, err
}
@ -46,7 +46,7 @@ func GenerateContainerRef(pod *api.Pod, container *api.Container) (*api.ObjectRe
// fieldPath returns a fieldPath locating container within pod.
// Returns an error if the container isn't part of the pod.
func fieldPath(pod *api.Pod, container *api.Container) (string, error) {
func fieldPath(pod *v1.Pod, container *v1.Container) (string, error) {
for i := range pod.Spec.Containers {
here := &pod.Spec.Containers[i]
if here.Name == container.Name {

View File

@ -19,29 +19,29 @@ package container
import (
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
)
func TestFieldPath(t *testing.T) {
pod := &api.Pod{Spec: api.PodSpec{Containers: []api.Container{
pod := &v1.Pod{Spec: v1.PodSpec{Containers: []v1.Container{
{Name: "foo"},
{Name: "bar"},
{Name: ""},
{Name: "baz"},
}}}
table := map[string]struct {
pod *api.Pod
container *api.Container
pod *v1.Pod
container *v1.Container
path string
success bool
}{
"basic": {pod, &api.Container{Name: "foo"}, "spec.containers{foo}", true},
"basic2": {pod, &api.Container{Name: "baz"}, "spec.containers{baz}", true},
"emptyName": {pod, &api.Container{Name: ""}, "spec.containers[2]", true},
"basic": {pod, &v1.Container{Name: "foo"}, "spec.containers{foo}", true},
"basic2": {pod, &v1.Container{Name: "baz"}, "spec.containers{baz}", true},
"emptyName": {pod, &v1.Container{Name: ""}, "spec.containers[2]", true},
"basicSamePointer": {pod, &pod.Spec.Containers[0], "spec.containers{foo}", true},
"missing": {pod, &api.Container{Name: "qux"}, "", false},
"missing": {pod, &v1.Container{Name: "qux"}, "", false},
}
for name, item := range table {
@ -64,20 +64,20 @@ func TestFieldPath(t *testing.T) {
func TestGenerateContainerRef(t *testing.T) {
var (
okPod = api.Pod{
okPod = v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "ok",
Namespace: "test-ns",
UID: "bar",
ResourceVersion: "42",
SelfLink: "/api/" + registered.GroupOrDie(api.GroupName).GroupVersion.String() + "/pods/foo",
SelfLink: "/api/" + registered.GroupOrDie(v1.GroupName).GroupVersion.String() + "/pods/foo",
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "by-name",
},
@ -91,24 +91,24 @@ func TestGenerateContainerRef(t *testing.T) {
noSelfLinkPod.Kind = ""
noSelfLinkPod.APIVersion = ""
noSelfLinkPod.ObjectMeta.SelfLink = ""
defaultedSelfLinkPod.ObjectMeta.SelfLink = "/api/" + registered.GroupOrDie(api.GroupName).GroupVersion.String() + "/pods/ok"
defaultedSelfLinkPod.ObjectMeta.SelfLink = "/api/" + registered.GroupOrDie(v1.GroupName).GroupVersion.String() + "/pods/ok"
cases := []struct {
name string
pod *api.Pod
container *api.Container
expected *api.ObjectReference
pod *v1.Pod
container *v1.Container
expected *v1.ObjectReference
success bool
}{
{
name: "by-name",
pod: &okPod,
container: &api.Container{
container: &v1.Container{
Name: "by-name",
},
expected: &api.ObjectReference{
expected: &v1.ObjectReference{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
Name: "ok",
Namespace: "test-ns",
UID: "bar",
@ -120,10 +120,10 @@ func TestGenerateContainerRef(t *testing.T) {
{
name: "no-name",
pod: &okPod,
container: &api.Container{},
expected: &api.ObjectReference{
container: &v1.Container{},
expected: &v1.ObjectReference{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
Name: "ok",
Namespace: "test-ns",
UID: "bar",
@ -135,19 +135,19 @@ func TestGenerateContainerRef(t *testing.T) {
{
name: "no-selflink",
pod: &noSelfLinkPod,
container: &api.Container{},
container: &v1.Container{},
expected: nil,
success: false,
},
{
name: "defaulted-selflink",
pod: &defaultedSelfLinkPod,
container: &api.Container{
container: &v1.Container{
Name: "by-name",
},
expected: &api.ObjectReference{
expected: &v1.ObjectReference{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
Name: "ok",
Namespace: "test-ns",
UID: "bar",
@ -159,12 +159,12 @@ func TestGenerateContainerRef(t *testing.T) {
{
name: "implicitly-required",
pod: &okPod,
container: &api.Container{
container: &v1.Container{
Name: "net",
},
expected: &api.ObjectReference{
expected: &v1.ObjectReference{
Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
Name: "ok",
Namespace: "test-ns",
UID: "bar",

View File

@ -25,7 +25,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
@ -85,13 +85,13 @@ type Runtime interface {
// TODO: Revisit this method and make it cleaner.
GarbageCollect(gcPolicy ContainerGCPolicy, allSourcesReady bool) error
// Syncs the running pod into the desired pod.
SyncPod(pod *api.Pod, apiPodStatus api.PodStatus, podStatus *PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) PodSyncResult
SyncPod(pod *v1.Pod, apiPodStatus v1.PodStatus, podStatus *PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
// TODO(random-liu): Return PodSyncResult in KillPod.
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error
KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visble in Runtime.
GetPodStatus(uid types.UID, name, namespace string) (*PodStatus, error)
@ -111,7 +111,7 @@ type Runtime interface {
// default, it returns a snapshot of the container log. Set 'follow' to true to
// stream the log. Set 'follow' to false and specify the number of lines (e.g.
// "100" or "all") to tail the log.
GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error)
GetContainerLogs(pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error)
// Delete a container. If the container is still running, an error is returned.
DeleteContainer(containerID ContainerID) error
// ImageService provides methods to image-related methods.
@ -147,7 +147,7 @@ type IndirectStreamingRuntime interface {
type ImageService interface {
// PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary.
PullImage(image ImageSpec, pullSecrets []api.Secret) error
PullImage(image ImageSpec, pullSecrets []v1.Secret) error
// IsImagePresent checks whether the container image is already in the local storage.
IsImagePresent(image ImageSpec) (bool, error)
// Gets all images currently on the machine.
@ -188,8 +188,8 @@ type Pod struct {
// PodPair contains both runtime#Pod and api#Pod
type PodPair struct {
// APIPod is the api.Pod
APIPod *api.Pod
// APIPod is the v1.Pod
APIPod *v1.Pod
// RunningPod is the pod defined defined in pkg/kubelet/container/runtime#Pod
RunningPod *Pod
}
@ -270,7 +270,7 @@ type Container struct {
// a container.
ID ContainerID
// The name of the container, which should be the same as specified by
// api.Container.
// v1.Container.
Name string
// The image name of the container, this also includes the tag of the image,
// the expected form is "NAME:TAG".
@ -285,7 +285,7 @@ type Container struct {
}
// PodStatus represents the status of the pod and its containers.
// api.PodStatus can be derived from examining PodStatus and api.Pod.
// v1.PodStatus can be derived from examining PodStatus and v1.Pod.
type PodStatus struct {
// ID of the pod.
ID types.UID
@ -392,7 +392,7 @@ type PortMapping struct {
// Name of the port mapping
Name string
// Protocol of the port mapping.
Protocol api.Protocol
Protocol v1.Protocol
// The port number within the container.
ContainerPort int
// The port number on the host.
@ -570,16 +570,16 @@ func (p *Pod) FindSandboxByID(id ContainerID) *Container {
return nil
}
// ToAPIPod converts Pod to api.Pod. Note that if a field in api.Pod has no
// ToAPIPod converts Pod to v1.Pod. Note that if a field in v1.Pod has no
// corresponding field in Pod, the field would not be populated.
func (p *Pod) ToAPIPod() *api.Pod {
var pod api.Pod
func (p *Pod) ToAPIPod() *v1.Pod {
var pod v1.Pod
pod.UID = p.ID
pod.Name = p.Name
pod.Namespace = p.Namespace
for _, c := range p.Containers {
var container api.Container
var container v1.Container
container.Name = c.Name
container.Image = c.Image
pod.Spec.Containers = append(pod.Spec.Containers, container)
@ -593,7 +593,7 @@ func (p *Pod) IsEmpty() bool {
}
// GetPodFullName returns a name that uniquely identifies a pod.
func GetPodFullName(pod *api.Pod) string {
func GetPodFullName(pod *v1.Pod) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format.
return pod.Name + "_" + pod.Namespace

View File

@ -24,7 +24,7 @@ import (
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
. "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
@ -44,7 +44,7 @@ type FakeRuntime struct {
PodList []*FakePod
AllPodList []*FakePod
ImageList []Image
APIPodStatus api.PodStatus
APIPodStatus v1.PodStatus
PodStatus PodStatus
StartedPods []string
KilledPods []string
@ -133,7 +133,7 @@ func (f *FakeRuntime) ClearCalls() {
f.CalledFunctions = []string{}
f.PodList = []*FakePod{}
f.AllPodList = []*FakePod{}
f.APIPodStatus = api.PodStatus{}
f.APIPodStatus = v1.PodStatus{}
f.StartedPods = []string{}
f.KilledPods = []string{}
f.StartedContainers = []string{}
@ -236,7 +236,7 @@ func (f *FakeRuntime) GetPods(all bool) ([]*Pod, error) {
return pods, f.Err
}
func (f *FakeRuntime) SyncPod(pod *api.Pod, _ api.PodStatus, _ *PodStatus, _ []api.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) {
func (f *FakeRuntime) SyncPod(pod *v1.Pod, _ v1.PodStatus, _ *PodStatus, _ []v1.Secret, backOff *flowcontrol.Backoff) (result PodSyncResult) {
f.Lock()
defer f.Unlock()
@ -252,7 +252,7 @@ func (f *FakeRuntime) SyncPod(pod *api.Pod, _ api.PodStatus, _ *PodStatus, _ []a
return
}
func (f *FakeRuntime) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error {
func (f *FakeRuntime) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error {
f.Lock()
defer f.Unlock()
@ -264,7 +264,7 @@ func (f *FakeRuntime) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride
return f.Err
}
func (f *FakeRuntime) RunContainerInPod(container api.Container, pod *api.Pod, volumeMap map[string]volume.VolumePlugin) error {
func (f *FakeRuntime) RunContainerInPod(container v1.Container, pod *v1.Pod, volumeMap map[string]volume.VolumePlugin) error {
f.Lock()
defer f.Unlock()
@ -281,14 +281,14 @@ func (f *FakeRuntime) RunContainerInPod(container api.Container, pod *api.Pod, v
return f.Err
}
func (f *FakeRuntime) KillContainerInPod(container api.Container, pod *api.Pod) error {
func (f *FakeRuntime) KillContainerInPod(container v1.Container, pod *v1.Pod) error {
f.Lock()
defer f.Unlock()
f.CalledFunctions = append(f.CalledFunctions, "KillContainerInPod")
f.KilledContainers = append(f.KilledContainers, container.Name)
var containers []api.Container
var containers []v1.Container
for _, c := range pod.Spec.Containers {
if c.Name == container.Name {
continue
@ -336,7 +336,7 @@ func (f *FakeDirectStreamingRuntime) AttachContainer(containerID ContainerID, st
return f.Err
}
func (f *FakeRuntime) GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) {
func (f *FakeRuntime) GetContainerLogs(pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
f.Lock()
defer f.Unlock()
@ -344,7 +344,7 @@ func (f *FakeRuntime) GetContainerLogs(pod *api.Pod, containerID ContainerID, lo
return f.Err
}
func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []api.Secret) error {
func (f *FakeRuntime) PullImage(image ImageSpec, pullSecrets []v1.Secret) error {
f.Lock()
defer f.Unlock()

View File

@ -21,7 +21,7 @@ import (
"time"
"github.com/stretchr/testify/mock"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
. "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/flowcontrol"
@ -65,22 +65,22 @@ func (r *Mock) GetPods(all bool) ([]*Pod, error) {
return args.Get(0).([]*Pod), args.Error(1)
}
func (r *Mock) SyncPod(pod *api.Pod, apiStatus api.PodStatus, status *PodStatus, secrets []api.Secret, backOff *flowcontrol.Backoff) PodSyncResult {
func (r *Mock) SyncPod(pod *v1.Pod, apiStatus v1.PodStatus, status *PodStatus, secrets []v1.Secret, backOff *flowcontrol.Backoff) PodSyncResult {
args := r.Called(pod, apiStatus, status, secrets, backOff)
return args.Get(0).(PodSyncResult)
}
func (r *Mock) KillPod(pod *api.Pod, runningPod Pod, gracePeriodOverride *int64) error {
func (r *Mock) KillPod(pod *v1.Pod, runningPod Pod, gracePeriodOverride *int64) error {
args := r.Called(pod, runningPod, gracePeriodOverride)
return args.Error(0)
}
func (r *Mock) RunContainerInPod(container api.Container, pod *api.Pod, volumeMap map[string]volume.VolumePlugin) error {
func (r *Mock) RunContainerInPod(container v1.Container, pod *v1.Pod, volumeMap map[string]volume.VolumePlugin) error {
args := r.Called(pod, pod, volumeMap)
return args.Error(0)
}
func (r *Mock) KillContainerInPod(container api.Container, pod *api.Pod) error {
func (r *Mock) KillContainerInPod(container v1.Container, pod *v1.Pod) error {
args := r.Called(pod, pod)
return args.Error(0)
}
@ -100,12 +100,12 @@ func (r *Mock) AttachContainer(containerID ContainerID, stdin io.Reader, stdout,
return args.Error(0)
}
func (r *Mock) GetContainerLogs(pod *api.Pod, containerID ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) {
func (r *Mock) GetContainerLogs(pod *v1.Pod, containerID ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
args := r.Called(pod, containerID, logOptions, stdout, stderr)
return args.Error(0)
}
func (r *Mock) PullImage(image ImageSpec, pullSecrets []api.Secret) error {
func (r *Mock) PullImage(image ImageSpec, pullSecrets []v1.Secret) error {
args := r.Called(image, pullSecrets)
return args.Error(0)
}

View File

@ -20,7 +20,7 @@ package custommetrics
import (
"path"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
const (
@ -31,7 +31,7 @@ const (
// Alpha implementation.
// Returns a path to a cAdvisor-specific custom metrics configuration.
func GetCAdvisorCustomMetricsDefinitionPath(container *api.Container) (*string, error) {
func GetCAdvisorCustomMetricsDefinitionPath(container *v1.Container) (*string, error) {
// Assuemes that the container has Custom Metrics enabled if it has "/etc/custom-metrics" directory
// mounted as a volume. Custom Metrics definition is expected to be in "definition.json".
if container.VolumeMounts != nil {

View File

@ -20,18 +20,18 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
func TestGetCAdvisorCustomMetricsDefinitionPath(t *testing.T) {
regularContainer := &api.Container{
regularContainer := &v1.Container{
Name: "test_container",
}
cmContainer := &api.Container{
cmContainer := &v1.Container{
Name: "test_container",
VolumeMounts: []api.VolumeMount{
VolumeMounts: []v1.VolumeMount{
{
Name: "cm",
MountPath: CustomMetricsDefinitionDir,

View File

@ -20,6 +20,7 @@ package cm
import (
"fmt"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
)

View File

@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Docker integration using pkg/kubelet/api/v1alpha1/runtime/api.pb.go.
// Docker integration using pkg/kubelet/api/v1alpha1/runtime/v1.pb.go.
package dockershim

View File

@ -305,7 +305,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeApi.PodSandboxConfig,
labels := makeLabels(c.GetLabels(), c.GetAnnotations())
// Apply a label to distinguish sandboxes from regular containers.
labels[containerTypeLabelKey] = containerTypeLabelSandbox
// Apply a container name label for infra container. This is used in summary api.
// Apply a container name label for infra container. This is used in summary v1.
// TODO(random-liu): Deprecate this label once container metrics is directly got from CRI.
labels[types.KubernetesContainerNameLabel] = sandboxContainerName

View File

@ -28,7 +28,7 @@ import (
dockernat "github.com/docker/go-connections/nat"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/kubelet/types"
@ -235,7 +235,7 @@ func getNetworkNamespace(c *dockertypes.ContainerJSON) string {
func getSysctlsFromAnnotations(annotations map[string]string) (map[string]string, error) {
var results map[string]string
sysctls, unsafeSysctls, err := api.SysctlsFromPodAnnotations(annotations)
sysctls, unsafeSysctls, err := v1.SysctlsFromPodAnnotations(annotations)
if err != nil {
return nil, err
}

View File

@ -22,7 +22,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/security/apparmor"
)
@ -58,19 +58,19 @@ func TestGetContainerSecurityOpts(t *testing.T) {
}, {
msg: "Seccomp unconfined",
config: makeConfig(map[string]string{
api.SeccompContainerAnnotationKeyPrefix + containerName: "unconfined",
v1.SeccompContainerAnnotationKeyPrefix + containerName: "unconfined",
}),
expectedOpts: []string{"seccomp=unconfined"},
}, {
msg: "Seccomp default",
config: makeConfig(map[string]string{
api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
}),
expectedOpts: nil,
}, {
msg: "Seccomp pod default",
config: makeConfig(map[string]string{
api.SeccompPodAnnotationKey: "docker/default",
v1.SeccompPodAnnotationKey: "docker/default",
}),
expectedOpts: nil,
}, {
@ -88,7 +88,7 @@ func TestGetContainerSecurityOpts(t *testing.T) {
}, {
msg: "AppArmor and seccomp profile",
config: makeConfig(map[string]string{
api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
}),
expectedOpts: []string{"apparmor=foo"},
@ -121,20 +121,20 @@ func TestGetSandboxSecurityOpts(t *testing.T) {
}, {
msg: "Seccomp default",
config: makeConfig(map[string]string{
api.SeccompPodAnnotationKey: "docker/default",
v1.SeccompPodAnnotationKey: "docker/default",
}),
expectedOpts: nil,
}, {
msg: "Seccomp unconfined",
config: makeConfig(map[string]string{
api.SeccompPodAnnotationKey: "unconfined",
v1.SeccompPodAnnotationKey: "unconfined",
}),
expectedOpts: []string{"seccomp=unconfined"},
}, {
msg: "Seccomp pod and container profile",
config: makeConfig(map[string]string{
api.SeccompContainerAnnotationKeyPrefix + "test-container": "unconfined",
api.SeccompPodAnnotationKey: "docker/default",
v1.SeccompContainerAnnotationKeyPrefix + "test-container": "unconfined",
v1.SeccompPodAnnotationKey: "docker/default",
}),
expectedOpts: nil,
}}
@ -156,8 +156,8 @@ func TestGetSystclsFromAnnotations(t *testing.T) {
expectedSysctls map[string]string
}{{
annotations: map[string]string{
api.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
api.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
},
expectedSysctls: map[string]string{
"kernel.shmmni": "32768",
@ -166,7 +166,7 @@ func TestGetSystclsFromAnnotations(t *testing.T) {
},
}, {
annotations: map[string]string{
api.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
v1.SysctlsPodAnnotationKey: "kernel.shmmni=32768,kernel.shmmax=1000000000",
},
expectedSysctls: map[string]string{
"kernel.shmmni": "32768",
@ -174,7 +174,7 @@ func TestGetSystclsFromAnnotations(t *testing.T) {
},
}, {
annotations: map[string]string{
api.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
v1.UnsafeSysctlsPodAnnotationKey: "knet.ipv4.route.min_pmtu=1000",
},
expectedSysctls: map[string]string{
"knet.ipv4.route.min_pmtu": "1000",

View File

@ -22,7 +22,7 @@ import (
dockercontainer "github.com/docker/engine-api/types/container"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/securitycontext"
)
@ -100,7 +100,7 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, sandboxID st
if sc.SelinuxOptions != nil {
hostConfig.SecurityOpt = securitycontext.ModifySecurityOptions(
hostConfig.SecurityOpt,
&api.SELinuxOptions{
&v1.SELinuxOptions{
User: sc.SelinuxOptions.GetUser(),
Role: sc.SelinuxOptions.GetRole(),
Type: sc.SelinuxOptions.GetType(),

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
)
@ -65,8 +65,8 @@ func makeUndefinedContainer(id string, running bool, created time.Time) *FakeCon
func addPods(podGetter podGetter, podUIDs ...types.UID) {
fakePodGetter := podGetter.(*fakePodGetter)
for _, uid := range podUIDs {
fakePodGetter.pods[uid] = &api.Pod{
ObjectMeta: api.ObjectMeta{
fakePodGetter.pods[uid] = &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod" + string(uid),
Namespace: "test",
UID: uid,

View File

@ -31,7 +31,7 @@ import (
dockerapi "github.com/docker/engine-api/client"
dockertypes "github.com/docker/engine-api/types"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/images"
@ -93,7 +93,7 @@ func SetContainerNamePrefix(prefix string) {
// DockerPuller is an abstract interface for testability. It abstracts image pull operations.
type DockerPuller interface {
Pull(image string, secrets []api.Secret) error
Pull(image string, secrets []v1.Secret) error
IsImagePresent(image string) (bool, error)
}
@ -225,7 +225,7 @@ func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool {
return false
}
func (p dockerPuller) Pull(image string, secrets []api.Secret) error {
func (p dockerPuller) Pull(image string, secrets []v1.Secret) error {
keyring, err := credentialprovider.MakeDockerKeyring(secrets, p.keyring)
if err != nil {
return err
@ -293,7 +293,7 @@ func (p dockerPuller) IsImagePresent(image string) (bool, error) {
// Although rand.Uint32() is not really unique, but it's enough for us because error will
// only occur when instances of the same container in the same pod have the same UID. The
// chance is really slim.
func BuildDockerName(dockerName KubeletContainerName, container *api.Container) (string, string, string) {
func BuildDockerName(dockerName KubeletContainerName, container *v1.Container) (string, string, string) {
containerName := dockerName.ContainerName + "." + strconv.FormatUint(kubecontainer.HashContainer(container), 16)
stableName := fmt.Sprintf("%s_%s_%s_%s",
containerNamePrefix,

View File

@ -45,6 +45,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -113,7 +114,7 @@ var (
_ kubecontainer.DirectStreamingRuntime = &DockerManager{}
// TODO: make this a TTL based pull (if image older than X policy, pull)
podInfraContainerImagePullPolicy = api.PullIfNotPresent
podInfraContainerImagePullPolicy = v1.PullIfNotPresent
// Default set of seccomp security options.
defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined", ""}}
@ -129,7 +130,7 @@ type DockerManager struct {
// The image name of the pod infra container.
podInfraContainerImage string
// (Optional) Additional environment variables to be set for the pod infra container.
podInfraContainerEnv []api.EnvVar
podInfraContainerEnv []v1.EnvVar
// TODO(yifan): Record the pull failure so we can eliminate the image checking?
// Lower level docker image puller.
@ -194,14 +195,14 @@ type DockerManager struct {
// A subset of the pod.Manager interface extracted for testing purposes.
type podGetter interface {
GetPodByUID(kubetypes.UID) (*api.Pod, bool)
GetPodByUID(kubetypes.UID) (*v1.Pod, bool)
}
func PodInfraContainerEnv(env map[string]string) kubecontainer.Option {
return func(rt kubecontainer.Runtime) {
dm := rt.(*DockerManager)
for k, v := range env {
dm.podInfraContainerEnv = append(dm.podInfraContainerEnv, api.EnvVar{
dm.podInfraContainerEnv = append(dm.podInfraContainerEnv, v1.EnvVar{
Name: k,
Value: v,
})
@ -308,7 +309,7 @@ func NewDockerManager(
// stream the log. Set 'follow' to false and specify the number of lines (e.g.
// "100" or "all") to tail the log.
// TODO: Make 'RawTerminal' option flagable.
func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error {
func (dm *DockerManager) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
container, err := dm.client.InspectContainer(containerID.ID)
if err != nil {
return err
@ -318,7 +319,7 @@ func (dm *DockerManager) GetContainerLogs(pod *api.Pod, containerID kubecontaine
// Temporarily export this function to share with dockershim.
// TODO: clean this up.
func GetContainerLogs(client DockerInterface, pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer, rawTerm bool) error {
func GetContainerLogs(client DockerInterface, pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer, rawTerm bool) error {
var since int64
if logOptions.SinceSeconds != nil {
t := unversioned.Now().Add(-time.Duration(*logOptions.SinceSeconds) * time.Second)
@ -584,10 +585,10 @@ func makePortsAndBindings(portMappings []kubecontainer.PortMapping) (map[dockern
}
func (dm *DockerManager) runContainer(
pod *api.Pod,
container *api.Container,
pod *v1.Pod,
container *v1.Container,
opts *kubecontainer.RunContainerOptions,
ref *api.ObjectReference,
ref *v1.ObjectReference,
netMode string,
ipcMode string,
utsMode string,
@ -620,7 +621,7 @@ func (dm *DockerManager) runContainer(
// TODO: This is kind of hacky, we should really just encode the bits we need.
// TODO: This is hacky because the Kubelet should be parameterized to encode a specific version
// and needs to be able to migrate this whenever we deprecate v1. Should be a member of DockerManager.
if data, err := kruntime.Encode(api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}), pod); err == nil {
if data, err := kruntime.Encode(api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: v1.GroupName, Version: "v1"}), pod); err == nil {
labels[kubernetesPodLabel] = string(data)
} else {
glog.Errorf("Failed to encode pod: %s for prestop hook", pod.Name)
@ -711,9 +712,9 @@ func (dm *DockerManager) runContainer(
// Set sysctls if requested
if container.Name == PodInfraContainerName {
sysctls, unsafeSysctls, err := api.SysctlsFromPodAnnotations(pod.Annotations)
sysctls, unsafeSysctls, err := v1.SysctlsFromPodAnnotations(pod.Annotations)
if err != nil {
dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err)
dm.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err)
return kubecontainer.ContainerID{}, err
}
if len(sysctls)+len(unsafeSysctls) > 0 {
@ -789,7 +790,7 @@ func (dm *DockerManager) runContainer(
securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig, supplementalGids)
createResp, err := dm.client.CreateContainer(dockerOpts)
if err != nil {
dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err)
dm.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create docker container %q of pod %q with error: %v", container.Name, format.Pod(pod), err)
return kubecontainer.ContainerID{}, err
}
if len(createResp.Warnings) != 0 {
@ -808,21 +809,21 @@ func (dm *DockerManager) runContainer(
}
createdEventMsg = fmt.Sprintf("%s; Security:[%s]", createdEventMsg, strings.Join(msgs, " "))
}
dm.recorder.Eventf(ref, api.EventTypeNormal, events.CreatedContainer, createdEventMsg)
dm.recorder.Eventf(ref, v1.EventTypeNormal, events.CreatedContainer, createdEventMsg)
if err = dm.client.StartContainer(createResp.ID); err != nil {
dm.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToStartContainer,
dm.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer,
"Failed to start container with docker id %v with error: %v", utilstrings.ShortenString(createResp.ID, 12), err)
return kubecontainer.ContainerID{}, err
}
dm.recorder.Eventf(ref, api.EventTypeNormal, events.StartedContainer, "Started container with docker id %v", utilstrings.ShortenString(createResp.ID, 12))
dm.recorder.Eventf(ref, v1.EventTypeNormal, events.StartedContainer, "Started container with docker id %v", utilstrings.ShortenString(createResp.ID, 12))
return kubecontainer.DockerID(createResp.ID).ContainerID(), nil
}
// setInfraContainerNetworkConfig sets the network configuration for the infra-container. We only set network configuration for infra-container, all
// the user containers will share the same network namespace with infra-container.
func setInfraContainerNetworkConfig(pod *api.Pod, netMode string, opts *kubecontainer.RunContainerOptions, dockerOpts *dockertypes.ContainerCreateConfig) {
func setInfraContainerNetworkConfig(pod *v1.Pod, netMode string, opts *kubecontainer.RunContainerOptions, dockerOpts *dockertypes.ContainerCreateConfig) {
exposedPorts, portBindings := makePortsAndBindings(opts.PortMappings)
dockerOpts.Config.ExposedPorts = exposedPorts
dockerOpts.HostConfig.PortBindings = dockernat.PortMap(portBindings)
@ -838,7 +839,7 @@ func setInfraContainerNetworkConfig(pod *api.Pod, netMode string, opts *kubecont
}
}
func setEntrypointAndCommand(container *api.Container, opts *kubecontainer.RunContainerOptions, dockerOpts dockertypes.ContainerCreateConfig) {
func setEntrypointAndCommand(container *v1.Container, opts *kubecontainer.RunContainerOptions, dockerOpts dockertypes.ContainerCreateConfig) {
command, args := kubecontainer.ExpandContainerCommandAndArgs(container, opts.Envs)
dockerOpts.Config.Entrypoint = dockerstrslice.StrSlice(command)
@ -957,7 +958,7 @@ func (dm *DockerManager) ListImages() ([]kubecontainer.Image, error) {
}
// PullImage pulls an image from network to local storage.
func (dm *DockerManager) PullImage(image kubecontainer.ImageSpec, secrets []api.Secret) error {
func (dm *DockerManager) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret) error {
return dm.dockerPuller.Pull(image.Image, secrets)
}
@ -983,8 +984,8 @@ func (dm *DockerManager) RemoveImage(image kubecontainer.ImageSpec) error {
}
// podInfraContainerChanged returns true if the pod infra container has changed.
func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) {
var ports []api.ContainerPort
func (dm *DockerManager) podInfraContainerChanged(pod *v1.Pod, podInfraContainerStatus *kubecontainer.ContainerStatus) (bool, error) {
var ports []v1.ContainerPort
// Check network mode.
if kubecontainer.IsHostNetworkPod(pod) {
@ -995,7 +996,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine
networkMode := getDockerNetworkMode(dockerPodInfraContainer)
if networkMode != namespaceModeHost {
glog.V(4).Infof("host: %v, %v", pod.Spec.SecurityContext.HostNetwork, networkMode)
glog.V(4).Infof("host: %v, %v", pod.Spec.HostNetwork, networkMode)
return true, nil
}
} else if dm.networkPlugin.Name() != "cni" && dm.networkPlugin.Name() != "kubenet" {
@ -1008,7 +1009,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine
ports = append(ports, container.Ports...)
}
}
expectedPodInfraContainer := &api.Container{
expectedPodInfraContainer := &v1.Container{
Name: PodInfraContainerName,
Image: dm.podInfraContainerImage,
Ports: ports,
@ -1019,7 +1020,7 @@ func (dm *DockerManager) podInfraContainerChanged(pod *api.Pod, podInfraContaine
}
// determine if the container root should be a read only filesystem.
func readOnlyRootFilesystem(container *api.Container) bool {
func readOnlyRootFilesystem(container *v1.Container) bool {
return container.SecurityContext != nil && container.SecurityContext.ReadOnlyRootFilesystem != nil && *container.SecurityContext.ReadOnlyRootFilesystem
}
@ -1171,7 +1172,7 @@ func (d dockerOpt) GetKV() (string, string) {
}
// Get the docker security options for seccomp.
func (dm *DockerManager) getSeccompOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) {
func (dm *DockerManager) getSeccompOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
version, err := dm.APIVersion()
if err != nil {
return nil, err
@ -1190,10 +1191,10 @@ func (dm *DockerManager) getSeccompOpts(pod *api.Pod, ctrName string) ([]dockerO
// Temporarily export this function to share with dockershim.
// TODO: clean this up.
func GetSeccompOpts(annotations map[string]string, ctrName, profileRoot string) ([]dockerOpt, error) {
profile, profileOK := annotations[api.SeccompContainerAnnotationKeyPrefix+ctrName]
profile, profileOK := annotations[v1.SeccompContainerAnnotationKeyPrefix+ctrName]
if !profileOK {
// try the pod profile
profile, profileOK = annotations[api.SeccompPodAnnotationKey]
profile, profileOK = annotations[v1.SeccompPodAnnotationKey]
if !profileOK {
// return early the default
return defaultSeccompOpt, nil
@ -1232,7 +1233,7 @@ func GetSeccompOpts(annotations map[string]string, ctrName, profileRoot string)
}
// Get the docker security options for AppArmor.
func (dm *DockerManager) getAppArmorOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) {
func (dm *DockerManager) getAppArmorOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
return GetAppArmorOpts(pod.Annotations, ctrName)
}
@ -1406,13 +1407,13 @@ func PortForward(client DockerInterface, podInfraContainerID string, port uint16
// TODO(random-liu): After using pod status for KillPod(), we can also remove the kubernetesPodLabel, because all the needed information should have
// been extract from new labels and stored in pod status.
// only hard eviction scenarios should provide a grace period override, all other code paths must pass nil.
func (dm *DockerManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
func (dm *DockerManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
result := dm.killPodWithSyncResult(pod, runningPod, gracePeriodOverride)
return result.Error()
}
// NOTE(random-liu): The pod passed in could be *nil* when kubelet restarted.
func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
func (dm *DockerManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
// Short circuit if there's nothing to kill.
if len(runningPod.Containers) == 0 {
return
@ -1423,7 +1424,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont
wg := sync.WaitGroup{}
var (
networkContainer *kubecontainer.Container
networkSpec *api.Container
networkSpec *v1.Container
)
wg.Add(len(runningPod.Containers))
for _, container := range runningPod.Containers {
@ -1431,7 +1432,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont
defer utilruntime.HandleCrash()
defer wg.Done()
var containerSpec *api.Container
var containerSpec *v1.Container
if pod != nil {
for i, c := range pod.Spec.Containers {
if c.Name == container.Name {
@ -1503,7 +1504,7 @@ func (dm *DockerManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecont
// KillContainerInPod kills a container in the pod. It must be passed either a container ID or a container and pod,
// and will attempt to lookup the other information if missing.
func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, message string, gracePeriodOverride *int64) error {
func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerID, container *v1.Container, pod *v1.Pod, message string, gracePeriodOverride *int64) error {
switch {
case containerID.IsEmpty():
// Locate the container.
@ -1542,7 +1543,7 @@ func (dm *DockerManager) KillContainerInPod(containerID kubecontainer.ContainerI
// KillContainerInPod if information must be retrieved first. It is only valid to provide a grace period override
// during hard eviction scenarios. All other code paths in kubelet must never provide a grace period override otherwise
// data corruption could occur in the end-user application.
func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *api.Container, pod *api.Pod, reason string, gracePeriodOverride *int64) error {
func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, container *v1.Container, pod *v1.Pod, reason string, gracePeriodOverride *int64) error {
ID := containerID.ID
name := ID
if container != nil {
@ -1614,7 +1615,7 @@ func (dm *DockerManager) killContainer(containerID kubecontainer.ContainerID, co
if reason != "" {
message = fmt.Sprint(message, ": ", reason)
}
dm.recorder.Event(ref, api.EventTypeNormal, events.KillingContainer, message)
dm.recorder.Event(ref, v1.EventTypeNormal, events.KillingContainer, message)
dm.containerRefManager.ClearRef(containerID)
}
return err
@ -1626,13 +1627,13 @@ func (dm *DockerManager) generateFailedContainerEvent(containerID kubecontainer.
glog.Warningf("No ref for pod '%q'", podName)
return
}
dm.recorder.Event(ref, api.EventTypeWarning, reason, message)
dm.recorder.Event(ref, v1.EventTypeWarning, reason, message)
}
var errNoPodOnContainer = fmt.Errorf("no pod information labels on Docker container")
// containerAndPodFromLabels tries to load the appropriate container info off of a Docker container's labels
func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod, container *api.Container, err error) {
func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *v1.Pod, container *v1.Container, err error) {
if inspect == nil || inspect.Config == nil || inspect.Config.Labels == nil {
return nil, nil, errNoPodOnContainer
}
@ -1640,7 +1641,7 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod
// the pod data may not be set
if body, found := labels[kubernetesPodLabel]; found {
pod = &api.Pod{}
pod = &v1.Pod{}
if err = kruntime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(body), pod); err == nil {
name := labels[types.KubernetesContainerNameLabel]
for ix := range pod.Spec.Containers {
@ -1670,7 +1671,7 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod
if pod == nil {
if period, ok := labels[kubernetesPodTerminationGracePeriodLabel]; ok {
if seconds, err := strconv.ParseInt(period, 10, 64); err == nil {
pod = &api.Pod{}
pod = &v1.Pod{}
pod.DeletionGracePeriodSeconds = &seconds
}
}
@ -1679,7 +1680,7 @@ func containerAndPodFromLabels(inspect *dockertypes.ContainerJSON) (pod *api.Pod
return
}
func (dm *DockerManager) applyOOMScoreAdj(pod *api.Pod, container *api.Container, containerInfo *dockertypes.ContainerJSON) error {
func (dm *DockerManager) applyOOMScoreAdj(pod *v1.Pod, container *v1.Container, containerInfo *dockertypes.ContainerJSON) error {
if containerInfo.State.Pid == 0 {
// Container exited. We cannot do anything about it. Ignore this error.
glog.V(2).Infof("Failed to apply OOM score adj on container %q with ID %q. Init process does not exist.", containerInfo.Name, containerInfo.ID)
@ -1709,7 +1710,7 @@ func (dm *DockerManager) applyOOMScoreAdj(pod *api.Pod, container *api.Container
// Run a single container from a pod. Returns the docker container ID
// If do not need to pass labels, just pass nil.
func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Container, netMode, ipcMode, pidMode, podIP string, restartCount int) (kubecontainer.ContainerID, error) {
func (dm *DockerManager) runContainerInPod(pod *v1.Pod, container *v1.Container, netMode, ipcMode, pidMode, podIP string, restartCount int) (kubecontainer.ContainerID, error) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("runContainerInPod").Observe(metrics.SinceInMicroseconds(start))
@ -1790,7 +1791,7 @@ func (dm *DockerManager) runContainerInPod(pod *api.Pod, container *api.Containe
return id, err
}
func (dm *DockerManager) applyOOMScoreAdjIfNeeded(pod *api.Pod, container *api.Container, containerInfo *dockertypes.ContainerJSON) error {
func (dm *DockerManager) applyOOMScoreAdjIfNeeded(pod *v1.Pod, container *v1.Container, containerInfo *dockertypes.ContainerJSON) error {
// Compare current API version with expected api version.
result, err := dm.checkDockerAPIVersion(dockerV110APIVersion)
if err != nil {
@ -1806,7 +1807,7 @@ func (dm *DockerManager) applyOOMScoreAdjIfNeeded(pod *api.Pod, container *api.C
return nil
}
func (dm *DockerManager) calculateOomScoreAdj(pod *api.Pod, container *api.Container) int {
func (dm *DockerManager) calculateOomScoreAdj(pod *v1.Pod, container *v1.Container) int {
// Set OOM score of the container based on the priority of the container.
// Processes in lower-priority pods should be killed first if the system runs out of memory.
// The main pod infrastructure container is considered high priority, since if it is killed the
@ -1880,14 +1881,14 @@ func appendToFile(filePath, stringToAppend string) error {
// createPodInfraContainer starts the pod infra container for a pod. Returns the docker container ID of the newly created container.
// If any error occurs in this function, it will return a brief error and a detailed error message.
func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.DockerID, error, string) {
func (dm *DockerManager) createPodInfraContainer(pod *v1.Pod) (kubecontainer.DockerID, error, string) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("createPodInfraContainer").Observe(metrics.SinceInMicroseconds(start))
}()
// Use host networking if specified.
netNamespace := ""
var ports []api.ContainerPort
var ports []v1.ContainerPort
if kubecontainer.IsHostNetworkPod(pod) {
netNamespace = namespaceModeHost
@ -1904,7 +1905,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do
}
}
container := &api.Container{
container := &v1.Container{
Name: PodInfraContainerName,
Image: dm.podInfraContainerImage,
Ports: ports,
@ -1948,7 +1949,7 @@ type podContainerChangesSpec struct {
ContainersToKeep map[kubecontainer.DockerID]int
}
func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) {
func (dm *DockerManager) computePodContainerChanges(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (podContainerChangesSpec, error) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("computePodContainerChanges").Observe(metrics.SinceInMicroseconds(start))
@ -2031,7 +2032,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
// If we're creating infra container everything will be killed anyway
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
// killed them when restarting Infra Container.
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
message := fmt.Sprintf("Infra Container is being recreated. %q will be restarted.", container.Name)
glog.V(1).Info(message)
containersToStart[index] = message
@ -2044,7 +2045,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
// If we have an initialization failure everything will be killed anyway
// If RestartPolicy is Always or OnFailure we restart containers that were running before we
// killed them when re-running initialization
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name)
glog.V(1).Info(message)
containersToStart[index] = message
@ -2069,7 +2070,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
containersToKeep[containerID] = index
continue
}
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
glog.Info(message)
containersToStart[index] = message
@ -2100,7 +2101,7 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
}
// Sync the running pod to match the specified desired pod.
func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
func (dm *DockerManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
start := time.Now()
defer func() {
metrics.ContainerManagerLatency.WithLabelValues("SyncPod").Observe(metrics.SinceInMicroseconds(start))
@ -2114,7 +2115,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
glog.V(3).Infof("Got container changes for pod %q: %+v", format.Pod(pod), containerChanges)
if containerChanges.InfraChanged {
dm.recorder.Eventf(pod, api.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
dm.recorder.Eventf(pod, v1.EventTypeNormal, "InfraChanged", "Pod infrastructure changed, it will be killed and re-created.")
}
if containerChanges.StartInfraContainer || (len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0) {
if len(containerChanges.ContainersToKeep) == 0 && len(containerChanges.ContainersToStart) == 0 {
@ -2139,7 +2140,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
if !keep && !keepInit {
glog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerStatus.Name, containerStatus.ID, format.Pod(pod))
// attempt to find the appropriate container policy
var podContainer *api.Container
var podContainer *v1.Container
var killMessage string
for i, c := range pod.Spec.Containers {
if c.Name == containerStatus.Name {
@ -2244,7 +2245,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name)
initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode))
result.AddSyncResult(initContainerResult)
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
if pod.Spec.RestartPolicy == v1.RestartPolicyNever {
utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status))
return
}
@ -2330,7 +2331,7 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
// tryContainerStart attempts to pull and start the container, returning an error and a reason string if the start
// was not successful.
func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) {
func (dm *DockerManager) tryContainerStart(container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, namespaceMode, pidMode, podIP string) (err error, reason string) {
err, msg := dm.imagePuller.EnsureImageExists(pod, container, pullSecrets)
if err != nil {
return err, msg
@ -2368,7 +2369,7 @@ func (dm *DockerManager) tryContainerStart(container *api.Container, pod *api.Po
// pruneInitContainers ensures that before we begin creating init containers, we have reduced the number
// of outstanding init containers still present. This reduces load on the container garbage collector
// by only preserving the most recent terminated init container.
func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) {
func (dm *DockerManager) pruneInitContainersBeforeStart(pod *v1.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.DockerID]int) {
// only the last execution of each init container should be preserved, and only preserve it if it is in the
// list of init containers to keep.
initContainerNames := sets.NewString()
@ -2417,7 +2418,7 @@ func (dm *DockerManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus
// findActiveInitContainer returns the status of the last failed container, the next init container to
// start, or done if there are no further init containers. Status is only returned if an init container
// failed, in which case next will point to the current container.
func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) (next *api.Container, status *kubecontainer.ContainerStatus, done bool) {
func findActiveInitContainer(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (next *v1.Container, status *kubecontainer.ContainerStatus, done bool) {
if len(pod.Spec.InitContainers) == 0 {
return nil, nil, true
}
@ -2449,7 +2450,7 @@ func findActiveInitContainer(pod *api.Pod, podStatus *kubecontainer.PodStatus) (
}
// verifyNonRoot returns an error if the container or image will run as the root user.
func (dm *DockerManager) verifyNonRoot(container *api.Container) error {
func (dm *DockerManager) verifyNonRoot(container *v1.Container) error {
if securitycontext.HasRunAsUser(container) {
if securitycontext.HasRootRunAsUser(container) {
return fmt.Errorf("container's runAsUser breaks non-root policy")
@ -2510,7 +2511,7 @@ func GetUserFromImageUser(id string) string {
// If all instances of a container are garbage collected, doBackOff will also return false, which means the container may be restarted before the
// backoff deadline. However, because that won't cause error and the chance is really slim, we can just ignore it for now.
// If a container is still in backoff, the function will return a brief backoff error and a detailed error message.
func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, error, string) {
func (dm *DockerManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, error, string) {
var cStatus *kubecontainer.ContainerStatus
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
// TODO(random-liu): Better define backoff start point; add unit and e2e test after we finalize this. (See github issue #22240)
@ -2532,7 +2533,7 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
stableName, _, _ := BuildDockerName(dockerName, container)
if backOff.IsInBackOffSince(stableName, ts) {
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
dm.recorder.Eventf(ref, api.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed docker container")
dm.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed docker container")
}
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, format.Pod(pod))
glog.Infof("%s", err.Error())
@ -2544,18 +2545,18 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
}
// getPidMode returns the pid mode to use on the docker container based on pod.Spec.HostPID.
func getPidMode(pod *api.Pod) string {
func getPidMode(pod *v1.Pod) string {
pidMode := ""
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostPID {
if pod.Spec.HostPID {
pidMode = namespaceModeHost
}
return pidMode
}
// getIPCMode returns the ipc mode to use on the docker container based on pod.Spec.HostIPC.
func getIPCMode(pod *api.Pod) string {
func getIPCMode(pod *v1.Pod) string {
ipcMode := ""
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostIPC {
if pod.Spec.HostIPC {
ipcMode = namespaceModeHost
}
return ipcMode

View File

@ -20,7 +20,7 @@ package dockertools
import (
dockertypes "github.com/docker/engine-api/types"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
func getContainerIP(container *dockertypes.ContainerJSON) string {
@ -45,7 +45,7 @@ func containerProvidesPodIP(name *KubeletContainerName) bool {
}
// Returns Seccomp and AppArmor Security options
func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) {
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
var securityOpts []dockerOpt
if seccompOpts, err := dm.getSeccompOpts(pod, ctrName); err != nil {
return nil, err

View File

@ -38,8 +38,8 @@ import (
"github.com/golang/mock/gomock"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -80,7 +80,7 @@ var _ kubecontainer.RuntimeHelper = &fakeRuntimeHelper{}
var testPodContainerDir string
func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
var opts kubecontainer.RunContainerOptions
var err error
if len(container.TerminationMessagePath) != 0 {
@ -93,12 +93,12 @@ func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container
return &opts, nil
}
func (f *fakeRuntimeHelper) GetClusterDNS(pod *api.Pod) ([]string, []string, error) {
func (f *fakeRuntimeHelper) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) {
return nil, nil, fmt.Errorf("not implemented")
}
// This is not used by docker runtime.
func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) {
func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
return "", "", nil
}
@ -106,7 +106,7 @@ func (f *fakeRuntimeHelper) GetPodDir(kubetypes.UID) string {
return ""
}
func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 {
func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return nil
}
@ -116,7 +116,7 @@ func newFakeImageManager() images.ImageManager {
return &fakeImageManager{}
}
func (m *fakeImageManager) EnsureImageExists(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) {
func (m *fakeImageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string) {
return nil, ""
}
@ -187,20 +187,20 @@ func matchString(t *testing.T, pattern, str string) bool {
func TestSetEntrypointAndCommand(t *testing.T) {
cases := []struct {
name string
container *api.Container
container *v1.Container
envs []kubecontainer.EnvVar
expected *dockertypes.ContainerCreateConfig
}{
{
name: "none",
container: &api.Container{},
container: &v1.Container{},
expected: &dockertypes.ContainerCreateConfig{
Config: &dockercontainer.Config{},
},
},
{
name: "command",
container: &api.Container{
container: &v1.Container{
Command: []string{"foo", "bar"},
},
expected: &dockertypes.ContainerCreateConfig{
@ -211,7 +211,7 @@ func TestSetEntrypointAndCommand(t *testing.T) {
},
{
name: "command expanded",
container: &api.Container{
container: &v1.Container{
Command: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"},
},
envs: []kubecontainer.EnvVar{
@ -232,7 +232,7 @@ func TestSetEntrypointAndCommand(t *testing.T) {
},
{
name: "args",
container: &api.Container{
container: &v1.Container{
Args: []string{"foo", "bar"},
},
expected: &dockertypes.ContainerCreateConfig{
@ -243,7 +243,7 @@ func TestSetEntrypointAndCommand(t *testing.T) {
},
{
name: "args expanded",
container: &api.Container{
container: &v1.Container{
Args: []string{"zap", "$(VAR_TEST)", "$(VAR_TEST2)"},
},
envs: []kubecontainer.EnvVar{
@ -264,7 +264,7 @@ func TestSetEntrypointAndCommand(t *testing.T) {
},
{
name: "both",
container: &api.Container{
container: &v1.Container{
Command: []string{"foo"},
Args: []string{"bar", "baz"},
},
@ -277,7 +277,7 @@ func TestSetEntrypointAndCommand(t *testing.T) {
},
{
name: "both expanded",
container: &api.Container{
container: &v1.Container{
Command: []string{"$(VAR_TEST2)--$(VAR_TEST)", "foo", "$(VAR_TEST3)"},
Args: []string{"foo", "$(VAR_TEST)", "$(VAR_TEST2)"},
},
@ -314,10 +314,10 @@ func TestSetEntrypointAndCommand(t *testing.T) {
}
setEntrypointAndCommand(tc.container, opts, actualOpts)
if e, a := tc.expected.Config.Entrypoint, actualOpts.Config.Entrypoint; !api.Semantic.DeepEqual(e, a) {
if e, a := tc.expected.Config.Entrypoint, actualOpts.Config.Entrypoint; !v1.Semantic.DeepEqual(e, a) {
t.Errorf("%v: unexpected entrypoint: expected %v, got %v", tc.name, e, a)
}
if e, a := tc.expected.Config.Cmd, actualOpts.Config.Cmd; !api.Semantic.DeepEqual(e, a) {
if e, a := tc.expected.Config.Cmd, actualOpts.Config.Cmd; !v1.Semantic.DeepEqual(e, a) {
t.Errorf("%v: unexpected command: expected %v, got %v", tc.name, e, a)
}
}
@ -477,13 +477,13 @@ func TestKillContainerInPodWithPreStop(t *testing.T) {
ExitCode: 0,
}
expectedCmd := []string{"foo.sh", "bar"}
pod := makePod("qux", &api.PodSpec{
Containers: []api.Container{
pod := makePod("qux", &v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Lifecycle: &api.Lifecycle{
PreStop: &api.Handler{
Exec: &api.ExecAction{
Lifecycle: &v1.Lifecycle{
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: expectedCmd,
},
},
@ -558,15 +558,15 @@ func TestIsAExitError(t *testing.T) {
}
}
func generatePodInfraContainerHash(pod *api.Pod) uint64 {
var ports []api.ContainerPort
if pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork {
func generatePodInfraContainerHash(pod *v1.Pod) uint64 {
var ports []v1.ContainerPort
if pod.Spec.SecurityContext == nil || !pod.Spec.HostNetwork {
for _, container := range pod.Spec.Containers {
ports = append(ports, container.Ports...)
}
}
container := &api.Container{
container := &v1.Container{
Name: PodInfraContainerName,
Image: "",
Ports: ports,
@ -577,7 +577,7 @@ func generatePodInfraContainerHash(pod *api.Pod) uint64 {
// runSyncPod is a helper function to retrieve the running pods from the fake
// docker client and runs SyncPod for the given pod.
func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod, backOff *flowcontrol.Backoff, expectErr bool) kubecontainer.PodSyncResult {
func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *v1.Pod, backOff *flowcontrol.Backoff, expectErr bool) kubecontainer.PodSyncResult {
podStatus, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
if err != nil {
t.Errorf("unexpected error: %v", err)
@ -586,8 +586,8 @@ func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, p
if backOff == nil {
backOff = flowcontrol.NewBackOff(time.Second, time.Minute)
}
// api.PodStatus is not used in SyncPod now, pass in an empty one.
result := dm.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
// v1.PodStatus is not used in SyncPod now, pass in an empty one.
result := dm.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
err = result.Error()
if err != nil && !expectErr {
t.Errorf("unexpected error: %v", err)
@ -601,8 +601,8 @@ func TestSyncPodCreateNetAndContainer(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
dm.podInfraContainerImage = "pod_infra_image"
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
@ -641,8 +641,8 @@ func TestSyncPodCreatesNetAndContainerPullsImage(t *testing.T) {
puller := dm.dockerPuller.(*FakeDockerPuller)
puller.HasImages = []string{}
dm.podInfraContainerImage = "foo/infra_image:v1"
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar", Image: "foo/something:v0", ImagePullPolicy: "IfNotPresent"},
},
})
@ -672,8 +672,8 @@ func TestSyncPodCreatesNetAndContainerPullsImage(t *testing.T) {
func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
@ -700,8 +700,8 @@ func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) {
func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
pod := makePod("foo1", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo1", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar1"},
},
})
@ -735,8 +735,8 @@ func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) {
func TestSyncPodDeletesDuplicate(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
pod := makePod("bar", &api.PodSpec{
Containers: []api.Container{
pod := makePod("bar", &v1.PodSpec{
Containers: []v1.Container{
{Name: "foo"},
},
})
@ -769,8 +769,8 @@ func TestSyncPodDeletesDuplicate(t *testing.T) {
func TestSyncPodBadHash(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
@ -802,8 +802,8 @@ func TestSyncPodsUnhealthy(t *testing.T) {
infraContainerID = "9876"
)
dm, fakeDocker := newTestDockerManager()
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{{Name: "unhealthy"}},
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{{Name: "unhealthy"}},
})
fakeDocker.SetFakeRunningContainers([]*FakeContainer{
@ -833,9 +833,9 @@ func TestSyncPodsUnhealthy(t *testing.T) {
func TestSyncPodsDoesNothing(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
container := api.Container{Name: "bar"}
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
container := v1.Container{Name: "bar"}
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
container,
},
})
@ -856,11 +856,11 @@ func TestSyncPodsDoesNothing(t *testing.T) {
func TestSyncPodWithRestartPolicy(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
containers := []api.Container{
containers := []v1.Container{
{Name: "succeeded"},
{Name: "failed"},
}
pod := makePod("foo", &api.PodSpec{
pod := makePod("foo", &v1.PodSpec{
Containers: containers,
})
dockerContainers := []*FakeContainer{
@ -886,13 +886,13 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
}}
tests := []struct {
policy api.RestartPolicy
policy v1.RestartPolicy
calls []string
created []string
stopped []string
}{
{
api.RestartPolicyAlways,
v1.RestartPolicyAlways,
[]string{
// Restart both containers.
"create", "start", "inspect_container", "create", "start", "inspect_container",
@ -901,7 +901,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
[]string{},
},
{
api.RestartPolicyOnFailure,
v1.RestartPolicyOnFailure,
[]string{
// Restart the failed container.
"create", "start", "inspect_container",
@ -910,7 +910,7 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
[]string{},
},
{
api.RestartPolicyNever,
v1.RestartPolicyNever,
[]string{
// Check the pod infra container.
"inspect_container", "inspect_container",
@ -943,11 +943,11 @@ func TestSyncPodBackoff(t *testing.T) {
startTime := fakeClock.Now()
dm, fakeDocker := newTestDockerManager()
containers := []api.Container{
containers := []v1.Container{
{Name: "good"},
{Name: "bad"},
}
pod := makePod("podfoo", &api.PodSpec{
pod := makePod("podfoo", &v1.PodSpec{
Containers: containers,
})
@ -1027,14 +1027,14 @@ func TestSyncPodBackoff(t *testing.T) {
func TestGetRestartCount(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
containerName := "bar"
pod := *makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := *makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: containerName},
},
RestartPolicy: "Always",
})
pod.Status = api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
pod.Status = v1.PodStatus{
ContainerStatuses: []v1.ContainerStatus{
{
Name: containerName,
RestartCount: 3,
@ -1043,7 +1043,7 @@ func TestGetRestartCount(t *testing.T) {
}
// Helper function for verifying the restart count.
verifyRestartCount := func(pod *api.Pod, expectedCount int) {
verifyRestartCount := func(pod *v1.Pod, expectedCount int) {
runSyncPod(t, dm, fakeDocker, pod, nil, false)
status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
if err != nil {
@ -1059,7 +1059,7 @@ func TestGetRestartCount(t *testing.T) {
}
}
killOneContainer := func(pod *api.Pod) {
killOneContainer := func(pod *v1.Pod) {
status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
if err != nil {
t.Fatalf("unexpected error %v", err)
@ -1108,13 +1108,13 @@ func TestGetRestartCount(t *testing.T) {
func TestGetTerminationMessagePath(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
containers := []api.Container{
containers := []v1.Container{
{
Name: "bar",
TerminationMessagePath: "/dev/somepath",
},
}
pod := makePod("foo", &api.PodSpec{
pod := makePod("foo", &v1.PodSpec{
Containers: containers,
})
@ -1140,13 +1140,13 @@ func TestSyncPodWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
fakeHTTPClient := &fakeHTTP{}
dm, fakeDocker := newTestDockerManagerWithHTTPClient(fakeHTTPClient)
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{
Name: "bar",
Lifecycle: &api.Lifecycle{
PostStart: &api.Handler{
HTTPGet: &api.HTTPGetAction{
Lifecycle: &v1.Lifecycle{
PostStart: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Host: "foo",
Port: intstr.FromInt(8080),
Path: "bar",
@ -1183,12 +1183,12 @@ func TestSyncPodEventHandlerFails(t *testing.T) {
fakeHTTPClient := &fakeHTTP{err: fmt.Errorf("test error")}
dm, fakeDocker := newTestDockerManagerWithHTTPClient(fakeHTTPClient)
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar",
Lifecycle: &api.Lifecycle{
PostStart: &api.Handler{
HTTPGet: &api.HTTPGetAction{
Lifecycle: &v1.Lifecycle{
PostStart: &v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Host: "does.no.exist",
Port: intstr.FromInt(8080),
Path: "bar",
@ -1256,12 +1256,12 @@ func TestPortForwardNoSuchContainer(t *testing.T) {
func TestSyncPodWithTerminationLog(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
container := api.Container{
container := v1.Container{
Name: "bar",
TerminationMessagePath: "/dev/somepath",
}
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
container,
},
})
@ -1298,13 +1298,12 @@ func TestSyncPodWithTerminationLog(t *testing.T) {
func TestSyncPodWithHostNetwork(t *testing.T) {
dm, fakeDocker := newTestDockerManager()
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
})
runSyncPod(t, dm, fakeDocker, pod, nil, false)
@ -1342,20 +1341,20 @@ func TestVerifyNonRoot(t *testing.T) {
var nonRootUid int64 = 1
tests := map[string]struct {
container *api.Container
container *v1.Container
inspectImage *dockertypes.ImageInspect
expectedError string
}{
// success cases
"non-root runAsUser": {
container: &api.Container{
SecurityContext: &api.SecurityContext{
container: &v1.Container{
SecurityContext: &v1.SecurityContext{
RunAsUser: &nonRootUid,
},
},
},
"numeric non-root image user": {
container: &api.Container{},
container: &v1.Container{},
inspectImage: &dockertypes.ImageInspect{
Config: &dockercontainer.Config{
User: "1",
@ -1363,7 +1362,7 @@ func TestVerifyNonRoot(t *testing.T) {
},
},
"numeric non-root image user with gid": {
container: &api.Container{},
container: &v1.Container{},
inspectImage: &dockertypes.ImageInspect{
Config: &dockercontainer.Config{
User: "1:2",
@ -1373,15 +1372,15 @@ func TestVerifyNonRoot(t *testing.T) {
// failure cases
"root runAsUser": {
container: &api.Container{
SecurityContext: &api.SecurityContext{
container: &v1.Container{
SecurityContext: &v1.SecurityContext{
RunAsUser: &rootUid,
},
},
expectedError: "container's runAsUser breaks non-root policy",
},
"non-numeric image user": {
container: &api.Container{},
container: &v1.Container{},
inspectImage: &dockertypes.ImageInspect{
Config: &dockercontainer.Config{
User: "foo",
@ -1390,7 +1389,7 @@ func TestVerifyNonRoot(t *testing.T) {
expectedError: "non-numeric user",
},
"numeric root image user": {
container: &api.Container{},
container: &v1.Container{},
inspectImage: &dockertypes.ImageInspect{
Config: &dockercontainer.Config{
User: "0",
@ -1399,7 +1398,7 @@ func TestVerifyNonRoot(t *testing.T) {
expectedError: "container has no runAsUser and image will run as root",
},
"numeric root image user with gid": {
container: &api.Container{},
container: &v1.Container{},
inspectImage: &dockertypes.ImageInspect{
Config: &dockercontainer.Config{
User: "0:1",
@ -1408,12 +1407,12 @@ func TestVerifyNonRoot(t *testing.T) {
expectedError: "container has no runAsUser and image will run as root",
},
"nil image in inspect": {
container: &api.Container{},
container: &v1.Container{},
inspectImage: nil,
expectedError: "unable to inspect image",
},
"nil config in image inspect": {
container: &api.Container{},
container: &v1.Container{},
inspectImage: &dockertypes.ImageInspect{},
expectedError: "unable to inspect image",
},
@ -1471,7 +1470,7 @@ func TestGetUserFromImageUser(t *testing.T) {
func TestGetPidMode(t *testing.T) {
// test false
pod := &api.Pod{}
pod := &v1.Pod{}
pidMode := getPidMode(pod)
if pidMode != "" {
@ -1479,8 +1478,8 @@ func TestGetPidMode(t *testing.T) {
}
// test true
pod.Spec.SecurityContext = &api.PodSecurityContext{}
pod.Spec.SecurityContext.HostPID = true
pod.Spec.SecurityContext = &v1.PodSecurityContext{}
pod.Spec.HostPID = true
pidMode = getPidMode(pod)
if pidMode != "host" {
t.Errorf("expected host pid mode for pod but got %v", pidMode)
@ -1489,7 +1488,7 @@ func TestGetPidMode(t *testing.T) {
func TestGetIPCMode(t *testing.T) {
// test false
pod := &api.Pod{}
pod := &v1.Pod{}
ipcMode := getIPCMode(pod)
if ipcMode != "" {
@ -1497,8 +1496,8 @@ func TestGetIPCMode(t *testing.T) {
}
// test true
pod.Spec.SecurityContext = &api.PodSecurityContext{}
pod.Spec.SecurityContext.HostIPC = true
pod.Spec.SecurityContext = &v1.PodSecurityContext{}
pod.Spec.HostIPC = true
ipcMode = getIPCMode(pod)
if ipcMode != "host" {
t.Errorf("expected host ipc mode for pod but got %v", ipcMode)
@ -1511,13 +1510,13 @@ func TestSyncPodWithPullPolicy(t *testing.T) {
puller.HasImages = []string{"foo/existing_one:v1", "foo/want:latest"}
dm.podInfraContainerImage = "foo/infra_image:v1"
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
{Name: "bar", Image: "foo/pull_always_image:v1", ImagePullPolicy: api.PullAlways},
{Name: "bar2", Image: "foo/pull_if_not_present_image:v1", ImagePullPolicy: api.PullIfNotPresent},
{Name: "bar3", Image: "foo/existing_one:v1", ImagePullPolicy: api.PullIfNotPresent},
{Name: "bar4", Image: "foo/want:latest", ImagePullPolicy: api.PullIfNotPresent},
{Name: "bar5", Image: "foo/pull_never_image:v1", ImagePullPolicy: api.PullNever},
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar", Image: "foo/pull_always_image:v1", ImagePullPolicy: v1.PullAlways},
{Name: "bar2", Image: "foo/pull_if_not_present_image:v1", ImagePullPolicy: v1.PullIfNotPresent},
{Name: "bar3", Image: "foo/existing_one:v1", ImagePullPolicy: v1.PullIfNotPresent},
{Name: "bar4", Image: "foo/want:latest", ImagePullPolicy: v1.PullIfNotPresent},
{Name: "bar5", Image: "foo/pull_never_image:v1", ImagePullPolicy: v1.PullNever},
},
})
@ -1555,25 +1554,25 @@ func TestSyncPodWithPullPolicy(t *testing.T) {
func TestSyncPodWithFailure(t *testing.T) {
pod := makePod("foo", nil)
tests := map[string]struct {
container api.Container
container v1.Container
dockerError map[string]error
pullerError []error
expected []*kubecontainer.SyncResult
}{
"PullImageFailure": {
api.Container{Name: "bar", Image: "foo/real_image:v1", ImagePullPolicy: api.PullAlways},
v1.Container{Name: "bar", Image: "foo/real_image:v1", ImagePullPolicy: v1.PullAlways},
map[string]error{},
[]error{fmt.Errorf("can't pull image")},
[]*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", images.ErrImagePull, "can't pull image"}},
},
"CreateContainerFailure": {
api.Container{Name: "bar", Image: "foo/already_present:v2"},
v1.Container{Name: "bar", Image: "foo/already_present:v2"},
map[string]error{"create": fmt.Errorf("can't create container")},
[]error{},
[]*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", kubecontainer.ErrRunContainer, "can't create container"}},
},
"StartContainerFailure": {
api.Container{Name: "bar", Image: "foo/already_present:v2"},
v1.Container{Name: "bar", Image: "foo/already_present:v2"},
map[string]error{"start": fmt.Errorf("can't start container")},
[]error{},
[]*kubecontainer.SyncResult{{kubecontainer.StartContainer, "bar", kubecontainer.ErrRunContainer, "can't start container"}},
@ -1592,7 +1591,7 @@ func TestSyncPodWithFailure(t *testing.T) {
}})
fakeDocker.InjectErrors(test.dockerError)
puller.ErrorsToInject = test.pullerError
pod.Spec.Containers = []api.Container{test.container}
pod.Spec.Containers = []v1.Container{test.container}
result := runSyncPod(t, dm, fakeDocker, pod, nil, true)
verifySyncResults(t, test.expected, result)
}
@ -1658,9 +1657,9 @@ func TestSecurityOptsOperator(t *testing.T) {
func TestGetSecurityOpts(t *testing.T) {
const containerName = "bar"
pod := func(annotations map[string]string) *api.Pod {
p := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := func(annotations map[string]string) *v1.Pod {
p := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: containerName},
},
})
@ -1670,7 +1669,7 @@ func TestGetSecurityOpts(t *testing.T) {
tests := []struct {
msg string
pod *api.Pod
pod *v1.Pod
expectedOpts []string
}{{
msg: "No security annotations",
@ -1679,7 +1678,7 @@ func TestGetSecurityOpts(t *testing.T) {
}, {
msg: "Seccomp default",
pod: pod(map[string]string{
api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
}),
expectedOpts: nil,
}, {
@ -1697,7 +1696,7 @@ func TestGetSecurityOpts(t *testing.T) {
}, {
msg: "AppArmor and seccomp profile",
pod: pod(map[string]string{
api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
}),
expectedOpts: []string{"apparmor=foo"},
@ -1722,8 +1721,8 @@ func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) {
recorder := record.NewFakeRecorder(20)
dm.recorder = recorder
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
@ -1752,19 +1751,19 @@ func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) {
assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions >= 1.10 must not have seccomp disabled by default")
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
assert.NoError(t, expectEvent(recorder, api.EventTypeNormal, events.CreatedContainer,
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined]", cid)))
}
func TestUnconfinedSeccompProfileWithDockerV110(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
pod := makePod("foo4", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo4", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar4"},
},
})
pod.Annotations = map[string]string{
api.SeccompPodAnnotationKey: "unconfined",
v1.SeccompPodAnnotationKey: "unconfined",
}
runSyncPod(t, dm, fakeDocker, pod, nil, false)
@ -1793,13 +1792,13 @@ func TestUnconfinedSeccompProfileWithDockerV110(t *testing.T) {
func TestDefaultSeccompProfileWithDockerV110(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
pod := makePod("foo1", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo1", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar1"},
},
})
pod.Annotations = map[string]string{
api.SeccompPodAnnotationKey: "docker/default",
v1.SeccompPodAnnotationKey: "docker/default",
}
runSyncPod(t, dm, fakeDocker, pod, nil, false)
@ -1828,14 +1827,14 @@ func TestDefaultSeccompProfileWithDockerV110(t *testing.T) {
func TestSeccompContainerAnnotationTrumpsPod(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
pod := makePod("foo2", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo2", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar2"},
},
})
pod.Annotations = map[string]string{
api.SeccompPodAnnotationKey: "unconfined",
api.SeccompContainerAnnotationKeyPrefix + "bar2": "docker/default",
v1.SeccompPodAnnotationKey: "unconfined",
v1.SeccompContainerAnnotationKeyPrefix + "bar2": "docker/default",
}
runSyncPod(t, dm, fakeDocker, pod, nil, false)
@ -1871,21 +1870,21 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) {
}{
{
annotations: map[string]string{
api.SeccompPodAnnotationKey: "localhost/test",
v1.SeccompPodAnnotationKey: "localhost/test",
},
expectedSecOpt: `seccomp={"foo":"bar"}`,
expectedSecMsg: "seccomp=test(md5:21aeae45053385adebd25311f9dd9cb1)",
},
{
annotations: map[string]string{
api.SeccompPodAnnotationKey: "localhost/sub/subtest",
v1.SeccompPodAnnotationKey: "localhost/sub/subtest",
},
expectedSecOpt: `seccomp={"abc":"def"}`,
expectedSecMsg: "seccomp=sub/subtest(md5:07c9bcb4db631f7ca191d6e0bca49f76)",
},
{
annotations: map[string]string{
api.SeccompPodAnnotationKey: "localhost/not-existing",
v1.SeccompPodAnnotationKey: "localhost/not-existing",
},
expectedError: "cannot load seccomp profile",
},
@ -1900,8 +1899,8 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) {
_, filename, _, _ := goruntime.Caller(0)
dm.seccompProfileRoot = path.Join(path.Dir(filename), "fixtures", "seccomp")
pod := makePod("foo2", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo2", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar2"},
},
})
@ -1935,7 +1934,7 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) {
assert.Contains(t, newContainer.HostConfig.SecurityOpt, test.expectedSecOpt, "The compacted seccomp json profile should be loaded.")
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
assert.NoError(t, expectEvent(recorder, api.EventTypeNormal, events.CreatedContainer,
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
fmt.Sprintf("Created container with docker id %s; Security:[%s]", cid, test.expectedSecMsg)),
"testcase %d", i)
}
@ -1943,8 +1942,8 @@ func TestSeccompLocalhostProfileIsLoaded(t *testing.T) {
func TestSecurityOptsAreNilWithDockerV19(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.9.1", "1.21")
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
@ -2011,8 +2010,8 @@ func TestCreateAppArmorContanier(t *testing.T) {
recorder := record.NewFakeRecorder(20)
dm.recorder = recorder
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
@ -2020,8 +2019,8 @@ func TestCreateAppArmorContanier(t *testing.T) {
apparmor.ContainerAnnotationKeyPrefix + "test": apparmor.ProfileNamePrefix + "test-profile",
},
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "test"},
},
},
@ -2053,7 +2052,7 @@ func TestCreateAppArmorContanier(t *testing.T) {
assert.Contains(t, securityOpts, "apparmor=test-profile", "Container should have apparmor security opt")
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
assert.NoError(t, expectEvent(recorder, api.EventTypeNormal, events.CreatedContainer,
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined apparmor=test-profile]", cid)))
}
@ -2154,8 +2153,8 @@ func TestGetPodStatusNoSuchContainer(t *testing.T) {
infraContainerID = "9876"
)
dm, fakeDocker := newTestDockerManager()
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{{Name: "nosuchcontainer"}},
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{{Name: "nosuchcontainer"}},
})
fakeDocker.SetFakeContainers([]*FakeContainer{
@ -2191,8 +2190,8 @@ func TestGetPodStatusNoSuchContainer(t *testing.T) {
func TestPruneInitContainers(t *testing.T) {
dm, fake := newTestDockerManager()
pod := makePod("", &api.PodSpec{
InitContainers: []api.Container{
pod := makePod("", &v1.PodSpec{
InitContainers: []v1.Container{
{Name: "init1"},
{Name: "init2"},
},
@ -2223,7 +2222,7 @@ func TestPruneInitContainers(t *testing.T) {
func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
cases := []struct {
pod *api.Pod
pod *v1.Pod
fakePodIP string
containerID string
infraContainerID string
@ -2232,14 +2231,14 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
expectUnknown bool
}{
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
pod: &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{{Name: "container"}},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "container"}},
},
},
fakePodIP: "10.10.10.10",
@ -2250,14 +2249,14 @@ func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
expectUnknown: false,
},
{
pod: &api.Pod{
ObjectMeta: api.ObjectMeta{
pod: &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{{Name: "container"}},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "container"}},
},
},
fakePodIP: "",
@ -2335,8 +2334,8 @@ func TestSyncPodGetsPodIPFromNetworkPlugin(t *testing.T) {
fnp := mock_network.NewMockNetworkPlugin(ctrl)
dm.networkPlugin = fnp
pod := makePod("foo", &api.PodSpec{
Containers: []api.Container{
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
@ -2387,12 +2386,12 @@ func TestContainerAndPodFromLabels(t *testing.T) {
}
}
func makePod(name string, spec *api.PodSpec) *api.Pod {
func makePod(name string, spec *v1.PodSpec) *v1.Pod {
if spec == nil {
spec = &api.PodSpec{Containers: []api.Container{{Name: "foo"}, {Name: "bar"}}}
spec = &v1.PodSpec{Containers: []v1.Container{{Name: "foo"}, {Name: "bar"}}}
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: name,
Namespace: "new",

View File

@ -19,7 +19,7 @@ limitations under the License.
package dockertools
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
dockertypes "github.com/docker/engine-api/types"
)
@ -37,6 +37,6 @@ func containerProvidesPodIP(name *KubeletContainerName) bool {
}
// Returns nil as both Seccomp and AppArmor security options are not valid on Windows
func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) {
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
return nil, nil
}

View File

@ -21,7 +21,7 @@ package dockertools
import (
"os"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
dockertypes "github.com/docker/engine-api/types"
)
@ -53,6 +53,6 @@ func containerProvidesPodIP(name *KubeletContainerName) bool {
}
// Returns nil as both Seccomp and AppArmor security options are not valid on Windows
func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) {
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
return nil, nil
}

View File

@ -33,7 +33,7 @@ import (
dockernat "github.com/docker/go-connections/nat"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
@ -118,7 +118,7 @@ func TestGetContainerID(t *testing.T) {
}
func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName string) {
container := &api.Container{Name: containerName}
container := &v1.Container{Name: containerName}
hasher := adler32.New()
hashutil.DeepHashObject(hasher, *container)
computedHash := uint64(hasher.Sum32())
@ -142,7 +142,7 @@ func TestContainerNaming(t *testing.T) {
// No Container name
verifyPackUnpack(t, "other", podUID, "name", "")
container := &api.Container{Name: "container"}
container := &v1.Container{Name: "container"}
podName := "foo"
podNamespace := "test"
name := fmt.Sprintf("k8s_%s_%s_%s_%s_42", container.Name, podName, podNamespace, podUID)
@ -416,7 +416,7 @@ func TestPullWithNoSecrets(t *testing.T) {
keyring: fakeKeyring,
}
err := dp.Pull(test.imageName, []api.Secret{})
err := dp.Pull(test.imageName, []v1.Secret{})
if err != nil {
t.Errorf("unexpected non-nil err: %s", err)
continue
@ -459,7 +459,7 @@ func TestPullWithJSONError(t *testing.T) {
client: fakeClient,
keyring: fakeKeyring,
}
err := puller.Pull(test.imageName, []api.Secret{})
err := puller.Pull(test.imageName, []v1.Secret{})
if err == nil || !strings.Contains(err.Error(), test.expectedError) {
t.Errorf("%s: expect error %s, got : %s", i, test.expectedError, err)
continue
@ -483,19 +483,19 @@ func TestPullWithSecrets(t *testing.T) {
tests := map[string]struct {
imageName string
passedSecrets []api.Secret
passedSecrets []v1.Secret
builtInDockerConfig credentialprovider.DockerConfig
expectedPulls []string
}{
"no matching secrets": {
"ubuntu",
[]api.Secret{},
[]v1.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{}),
[]string{"ubuntu using {}"},
},
"default keyring secrets": {
"ubuntu",
[]api.Secret{},
[]v1.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
@ -503,7 +503,7 @@ func TestPullWithSecrets(t *testing.T) {
},
"default keyring secrets unused": {
"ubuntu",
[]api.Secret{},
[]v1.Secret{},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"extraneous": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
@ -511,7 +511,7 @@ func TestPullWithSecrets(t *testing.T) {
},
"builtin keyring secrets, but use passed": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockercfg, Data: map[string][]byte{api.DockerConfigKey: dockercfgContent}}},
[]v1.Secret{{Type: v1.SecretTypeDockercfg, Data: map[string][]byte{v1.DockerConfigKey: dockercfgContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
@ -519,7 +519,7 @@ func TestPullWithSecrets(t *testing.T) {
},
"builtin keyring secrets, but use passed with new docker config": {
"ubuntu",
[]api.Secret{{Type: api.SecretTypeDockerConfigJson, Data: map[string][]byte{api.DockerConfigJsonKey: dockerConfigJsonContent}}},
[]v1.Secret{{Type: v1.SecretTypeDockerConfigJson, Data: map[string][]byte{v1.DockerConfigJsonKey: dockerConfigJsonContent}}},
credentialprovider.DockerConfig(map[string]credentialprovider.DockerConfigEntry{
"index.docker.io/v1/": {Username: "built-in", Password: "password", Email: "email", Provider: nil},
}),
@ -564,7 +564,7 @@ func TestDockerKeyringLookupFails(t *testing.T) {
keyring: fakeKeyring,
}
err := dp.Pull("host/repository/image:version", []api.Secret{})
err := dp.Pull("host/repository/image:version", []v1.Secret{})
if err == nil {
t.Errorf("unexpected non-error")
}
@ -908,7 +908,7 @@ func TestFindContainersByPod(t *testing.T) {
}
func TestMakePortsAndBindings(t *testing.T) {
portMapping := func(container, host int, protocol api.Protocol, ip string) kubecontainer.PortMapping {
portMapping := func(container, host int, protocol v1.Protocol, ip string) kubecontainer.PortMapping {
return kubecontainer.PortMapping{
ContainerPort: container,
HostPort: host,

View File

@ -30,7 +30,7 @@ import (
dockercontainer "github.com/docker/engine-api/types/container"
"k8s.io/kubernetes/pkg/util/clock"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
type calledDetail struct {
@ -580,7 +580,7 @@ type FakeDockerPuller struct {
}
// Pull records the image pull attempt, and optionally injects an error.
func (f *FakeDockerPuller) Pull(image string, secrets []api.Secret) (err error) {
func (f *FakeDockerPuller) Pull(image string, secrets []v1.Secret) (err error) {
f.Lock()
defer f.Unlock()
f.ImagesPulled = append(f.ImagesPulled, image)

View File

@ -18,7 +18,7 @@ package dockertools
import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
@ -65,14 +65,14 @@ func NewFakeDockerManager(
}
type fakePodGetter struct {
pods map[types.UID]*api.Pod
pods map[types.UID]*v1.Pod
}
func newFakePodGetter() *fakePodGetter {
return &fakePodGetter{make(map[types.UID]*api.Pod)}
return &fakePodGetter{make(map[types.UID]*v1.Pod)}
}
func (f *fakePodGetter) GetPodByUID(uid types.UID) (*api.Pod, bool) {
func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
pod, found := f.pods[uid]
return pod, found
}

View File

@ -161,7 +161,7 @@ func (d *kubeDockerClient) StartContainer(id string) error {
return err
}
// Stopping an already stopped container will not cause an error in engine-api.
// Stopping an already stopped container will not cause an error in engine-v1.
func (d *kubeDockerClient) StopContainer(id string, timeout int) error {
ctx, cancel := d.getCustomTimeoutContext(time.Duration(timeout) * time.Second)
defer cancel()

View File

@ -22,6 +22,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/custommetrics"
"k8s.io/kubernetes/pkg/kubelet/types"
@ -62,11 +63,11 @@ type labelledContainerInfo struct {
Hash string
RestartCount int
TerminationMessagePath string
PreStopHandler *api.Handler
Ports []api.ContainerPort
PreStopHandler *v1.Handler
Ports []v1.ContainerPort
}
func newLabels(container *api.Container, pod *api.Pod, restartCount int, enableCustomMetrics bool) map[string]string {
func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCustomMetrics bool) map[string]string {
labels := map[string]string{}
labels[types.KubernetesPodNameLabel] = pod.Name
labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
@ -128,13 +129,13 @@ func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil {
logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err)
}
preStopHandler := &api.Handler{}
preStopHandler := &v1.Handler{}
if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil {
logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err)
} else if found {
containerInfo.PreStopHandler = preStopHandler
}
containerPorts := []api.ContainerPort{}
containerPorts := []v1.ContainerPort{}
if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPortsLabel, &containerPorts); err != nil {
logError(containerInfo, kubernetesContainerPortsLabel, err)
} else if found {
@ -192,7 +193,7 @@ func getJsonObjectFromLabel(labels map[string]string, label string, value interf
return false, nil
}
// The label kubernetesPodLabel is added a long time ago (#7421), it serialized the whole api.Pod to a docker label.
// The label kubernetesPodLabel is added a long time ago (#7421), it serialized the whole v1.Pod to a docker label.
// We want to remove this label because it serialized too much useless information. However kubelet may still work
// with old containers which only have this label for a long time until we completely deprecate the old label.
// Before that to ensure correctness we have to supply information with the old labels when newly added labels
@ -200,15 +201,15 @@ func getJsonObjectFromLabel(labels map[string]string, label string, value interf
// TODO(random-liu): Remove this function when we can completely remove label kubernetesPodLabel, probably after
// dropping support for v1.1.
func supplyContainerInfoWithOldLabel(labels map[string]string, containerInfo *labelledContainerInfo) {
// Get api.Pod from old label
var pod *api.Pod
// Get v1.Pod from old label
var pod *v1.Pod
data, found := labels[kubernetesPodLabel]
if !found {
// Don't report any error here, because it's normal that a container has no pod label, especially
// when we gradually deprecate the old label
return
}
pod = &api.Pod{}
pod = &v1.Pod{}
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(data), pod); err != nil {
// If the pod label can't be parsed, we should report an error
logError(containerInfo, kubernetesPodLabel, err)
@ -221,8 +222,8 @@ func supplyContainerInfoWithOldLabel(labels map[string]string, containerInfo *la
containerInfo.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds
}
// Get api.Container from api.Pod
var container *api.Container
// Get v1.Container from v1.Pod
var container *v1.Container
for i := range pod.Spec.Containers {
if pod.Spec.Containers[i].Name == containerInfo.Name {
container = &pod.Spec.Containers[i]

View File

@ -21,8 +21,8 @@ import (
"strconv"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/runtime"
@ -33,52 +33,52 @@ func TestLabels(t *testing.T) {
restartCount := 5
deletionGracePeriod := int64(10)
terminationGracePeriod := int64(10)
lifecycle := &api.Lifecycle{
lifecycle := &v1.Lifecycle{
// Left PostStart as nil
PreStop: &api.Handler{
Exec: &api.ExecAction{
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"action1", "action2"},
},
HTTPGet: &api.HTTPGetAction{
HTTPGet: &v1.HTTPGetAction{
Path: "path",
Host: "host",
Port: intstr.FromInt(8080),
Scheme: "scheme",
},
TCPSocket: &api.TCPSocketAction{
TCPSocket: &v1.TCPSocketAction{
Port: intstr.FromString("80"),
},
},
}
containerPorts := []api.ContainerPort{
containerPorts := []v1.ContainerPort{
{
Name: "http",
HostPort: 80,
ContainerPort: 8080,
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
{
Name: "https",
HostPort: 443,
ContainerPort: 6443,
Protocol: api.ProtocolTCP,
Protocol: v1.ProtocolTCP,
},
}
container := &api.Container{
container := &v1.Container{
Name: "test_container",
Ports: containerPorts,
TerminationMessagePath: "/somepath",
Lifecycle: lifecycle,
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
DeletionGracePeriodSeconds: &deletionGracePeriod,
},
Spec: api.PodSpec{
Containers: []api.Container{*container},
Spec: v1.PodSpec{
Containers: []v1.Container{*container},
TerminationGracePeriodSeconds: &terminationGracePeriod,
},
}

View File

@ -21,36 +21,36 @@ import (
"strconv"
"strings"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
// FromServices builds environment variables that a container is started with,
// which tell the container where to find the services it may need, which are
// provided as an argument.
func FromServices(services []*api.Service) []api.EnvVar {
var result []api.EnvVar
func FromServices(services []*v1.Service) []v1.EnvVar {
var result []v1.EnvVar
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
// the services passed to this method should be pre-filtered
// only services that have the cluster IP set should be included here
if !api.IsServiceIPSet(service) {
if !v1.IsServiceIPSet(service) {
continue
}
// Host
name := makeEnvVariableName(service.Name) + "_SERVICE_HOST"
result = append(result, api.EnvVar{Name: name, Value: service.Spec.ClusterIP})
result = append(result, v1.EnvVar{Name: name, Value: service.Spec.ClusterIP})
// First port - give it the backwards-compatible name
name = makeEnvVariableName(service.Name) + "_SERVICE_PORT"
result = append(result, api.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))})
result = append(result, v1.EnvVar{Name: name, Value: strconv.Itoa(int(service.Spec.Ports[0].Port))})
// All named ports (only the first may be unnamed, checked in validation)
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
if sp.Name != "" {
pn := name + "_" + makeEnvVariableName(sp.Name)
result = append(result, api.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))})
result = append(result, v1.EnvVar{Name: pn, Value: strconv.Itoa(int(sp.Port))})
}
}
// Docker-compatible vars.
@ -67,25 +67,25 @@ func makeEnvVariableName(str string) string {
return strings.ToUpper(strings.Replace(str, "-", "_", -1))
}
func makeLinkVariables(service *api.Service) []api.EnvVar {
func makeLinkVariables(service *v1.Service) []v1.EnvVar {
prefix := makeEnvVariableName(service.Name)
all := []api.EnvVar{}
all := []v1.EnvVar{}
for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i]
protocol := string(api.ProtocolTCP)
protocol := string(v1.ProtocolTCP)
if sp.Protocol != "" {
protocol = string(sp.Protocol)
}
if i == 0 {
// Docker special-cases the first port.
all = append(all, api.EnvVar{
all = append(all, v1.EnvVar{
Name: prefix + "_PORT",
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port),
})
}
portPrefix := fmt.Sprintf("%s_PORT_%d_%s", prefix, sp.Port, strings.ToUpper(protocol))
all = append(all, []api.EnvVar{
all = append(all, []v1.EnvVar{
{
Name: portPrefix,
Value: fmt.Sprintf("%s://%s:%d", strings.ToLower(protocol), service.Spec.ClusterIP, sp.Port),

View File

@ -20,67 +20,67 @@ import (
"reflect"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/envvars"
)
func TestFromServices(t *testing.T) {
sl := []*api.Service{
sl := []*v1.Service{
{
ObjectMeta: api.ObjectMeta{Name: "foo-bar"},
Spec: api.ServiceSpec{
ObjectMeta: v1.ObjectMeta{Name: "foo-bar"},
Spec: v1.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "1.2.3.4",
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{Port: 8080, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "abc-123"},
Spec: api.ServiceSpec{
ObjectMeta: v1.ObjectMeta{Name: "abc-123"},
Spec: v1.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "5.6.7.8",
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{Name: "u-d-p", Port: 8081, Protocol: "UDP"},
{Name: "t-c-p", Port: 8081, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "q-u-u-x"},
Spec: api.ServiceSpec{
ObjectMeta: v1.ObjectMeta{Name: "q-u-u-x"},
Spec: v1.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "9.8.7.6",
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{Port: 8082, Protocol: "TCP"},
{Name: "8083", Port: 8083, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-none"},
Spec: api.ServiceSpec{
ObjectMeta: v1.ObjectMeta{Name: "svrc-clusterip-none"},
Spec: v1.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "None",
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{Port: 8082, Protocol: "TCP"},
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "svrc-clusterip-empty"},
Spec: api.ServiceSpec{
ObjectMeta: v1.ObjectMeta{Name: "svrc-clusterip-empty"},
Spec: v1.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
ClusterIP: "",
Ports: []api.ServicePort{
Ports: []v1.ServicePort{
{Port: 8082, Protocol: "TCP"},
},
},
},
}
vars := envvars.FromServices(sl)
expected := []api.EnvVar{
expected := []v1.EnvVar{
{Name: "FOO_BAR_SERVICE_HOST", Value: "1.2.3.4"},
{Name: "FOO_BAR_SERVICE_PORT", Value: "8080"},
{Name: "FOO_BAR_PORT", Value: "tcp://1.2.3.4:8080"},

View File

@ -23,8 +23,8 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -48,11 +48,11 @@ type managerImpl struct {
// protects access to internal state
sync.RWMutex
// node conditions are the set of conditions present
nodeConditions []api.NodeConditionType
nodeConditions []v1.NodeConditionType
// captures when a node condition was last observed based on a threshold being met
nodeConditionsLastObservedAt nodeConditionsObservedAt
// nodeRef is a reference to the node
nodeRef *api.ObjectReference
nodeRef *v1.ObjectReference
// used to record events about the node
recorder record.EventRecorder
// used to measure usage stats on system
@ -62,9 +62,9 @@ type managerImpl struct {
// records the set of thresholds that have been met (including graceperiod) but not yet resolved
thresholdsMet []Threshold
// resourceToRankFunc maps a resource to ranking function for that resource.
resourceToRankFunc map[api.ResourceName]rankFunc
resourceToRankFunc map[v1.ResourceName]rankFunc
// resourceToNodeReclaimFuncs maps a resource to an ordered list of functions that know how to reclaim that resource.
resourceToNodeReclaimFuncs map[api.ResourceName]nodeReclaimFuncs
resourceToNodeReclaimFuncs map[v1.ResourceName]nodeReclaimFuncs
// last observations from synchronize
lastObservations signalObservations
// notifiersInitialized indicates if the threshold notifiers have been initialized (i.e. synchronize() has been called once)
@ -81,7 +81,7 @@ func NewManager(
killPodFunc KillPodFunc,
imageGC ImageGC,
recorder record.EventRecorder,
nodeRef *api.ObjectReference,
nodeRef *v1.ObjectReference,
clock clock.Clock) (Manager, lifecycle.PodAdmitHandler, error) {
manager := &managerImpl{
clock: clock,
@ -106,7 +106,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
}
// the node has memory pressure, admit if not best-effort
if hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure) {
if hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) {
notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod)
if notBestEffort {
return lifecycle.PodAdmitResult{Admit: true}
@ -133,14 +133,14 @@ func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePod
func (m *managerImpl) IsUnderMemoryPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure)
return hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure)
}
// IsUnderDiskPressure returns true if the node is under disk pressure.
func (m *managerImpl) IsUnderDiskPressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, api.NodeDiskPressure)
return hasNodeCondition(m.nodeConditions, v1.NodeDiskPressure)
}
func startMemoryThresholdNotifier(thresholds []Threshold, observations signalObservations, hard bool, handler thresholdNotifierHandlerFunc) error {
@ -278,7 +278,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
softEviction := isSoftEvictionThresholds(thresholds, resourceToReclaim)
// record an event about the resources we are now attempting to reclaim via eviction
m.recorder.Eventf(m.nodeRef, api.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim)
m.recorder.Eventf(m.nodeRef, v1.EventTypeWarning, "EvictionThresholdMet", "Attempting to reclaim %s", resourceToReclaim)
// check if there are node-level resources we can reclaim to reduce pressure before evicting end-user pods.
if m.reclaimNodeLevelResources(resourceToReclaim, observations) {
@ -310,13 +310,13 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
// we kill at most a single pod during each eviction interval
for i := range activePods {
pod := activePods[i]
status := api.PodStatus{
Phase: api.PodFailed,
status := v1.PodStatus{
Phase: v1.PodFailed,
Message: fmt.Sprintf(message, resourceToReclaim),
Reason: reason,
}
// record that we are evicting the pod
m.recorder.Eventf(pod, api.EventTypeWarning, reason, fmt.Sprintf(message, resourceToReclaim))
m.recorder.Eventf(pod, v1.EventTypeWarning, reason, fmt.Sprintf(message, resourceToReclaim))
gracePeriodOverride := int64(0)
if softEviction {
gracePeriodOverride = m.config.MaxPodGracePeriodSeconds
@ -335,7 +335,7 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
}
// reclaimNodeLevelResources attempts to reclaim node level resources. returns true if thresholds were satisfied and no pod eviction is required.
func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim api.ResourceName, observations signalObservations) bool {
func (m *managerImpl) reclaimNodeLevelResources(resourceToReclaim v1.ResourceName, observations signalObservations) bool {
nodeReclaimFuncs := m.resourceToNodeReclaimFuncs[resourceToReclaim]
for _, nodeReclaimFunc := range nodeReclaimFuncs {
// attempt to reclaim the pressured resource.

View File

@ -20,8 +20,8 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
@ -31,13 +31,13 @@ import (
// mockPodKiller is used to testing which pod is killed
type mockPodKiller struct {
pod *api.Pod
status api.PodStatus
pod *v1.Pod
status v1.PodStatus
gracePeriodOverride *int64
}
// killPodNow records the pod that was killed
func (m *mockPodKiller) killPodNow(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error {
func (m *mockPodKiller) killPodNow(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error {
m.pod = pod
m.status = status
m.gracePeriodOverride = gracePeriodOverride
@ -67,23 +67,23 @@ func (m *mockImageGC) DeleteUnusedImages() (int64, error) {
return m.freed, m.err
}
func makePodWithMemoryStats(name string, requests api.ResourceList, limits api.ResourceList, memoryWorkingSet string) (*api.Pod, statsapi.PodStats) {
pod := newPod(name, []api.Container{
func makePodWithMemoryStats(name string, requests v1.ResourceList, limits v1.ResourceList, memoryWorkingSet string) (*v1.Pod, statsapi.PodStats) {
pod := newPod(name, []v1.Container{
newContainer(name, requests, limits),
}, nil)
podStats := newPodMemoryStats(pod, resource.MustParse(memoryWorkingSet))
return pod, podStats
}
func makePodWithDiskStats(name string, requests api.ResourceList, limits api.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*api.Pod, statsapi.PodStats) {
pod := newPod(name, []api.Container{
func makePodWithDiskStats(name string, requests v1.ResourceList, limits v1.ResourceList, rootFsUsed, logsUsed, perLocalVolumeUsed string) (*v1.Pod, statsapi.PodStats) {
pod := newPod(name, []v1.Container{
newContainer(name, requests, limits),
}, nil)
podStats := newPodDiskStats(pod, parseQuantity(rootFsUsed), parseQuantity(logsUsed), parseQuantity(perLocalVolumeUsed))
return pod, podStats
}
func makeMemoryStats(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
func makeMemoryStats(nodeAvailableBytes string, podStats map[*v1.Pod]statsapi.PodStats) *statsapi.Summary {
val := resource.MustParse(nodeAvailableBytes)
availableBytes := uint64(val.Value())
WorkingSetBytes := uint64(val.Value())
@ -102,7 +102,7 @@ func makeMemoryStats(nodeAvailableBytes string, podStats map[*api.Pod]statsapi.P
return result
}
func makeDiskStats(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
func makeDiskStats(rootFsAvailableBytes, imageFsAvailableBytes string, podStats map[*v1.Pod]statsapi.PodStats) *statsapi.Summary {
rootFsVal := resource.MustParse(rootFsAvailableBytes)
rootFsBytes := uint64(rootFsVal.Value())
rootFsCapacityBytes := uint64(rootFsVal.Value() * 2)
@ -132,8 +132,8 @@ func makeDiskStats(rootFsAvailableBytes, imageFsAvailableBytes string, podStats
type podToMake struct {
name string
requests api.ResourceList
limits api.ResourceList
requests v1.ResourceList
limits v1.ResourceList
memoryWorkingSet string
rootFsUsed string
logsFsUsed string
@ -155,15 +155,15 @@ func TestMemoryPressure(t *testing.T) {
{name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"},
{name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"},
}
pods := []*api.Pod{}
podStats := map[*api.Pod]statsapi.PodStats{}
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[5]
activePodsFunc := func() []*api.Pod {
activePodsFunc := func() []*v1.Pod {
return pods
}
@ -171,7 +171,7 @@ func TestMemoryPressure(t *testing.T) {
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
imageGC := &mockImageGC{freed: int64(0), err: nil}
nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
config := Config{
MaxPodGracePeriodSeconds: 5,
@ -221,7 +221,7 @@ func TestMemoryPressure(t *testing.T) {
// try to admit our pods (they should succeed)
expected := []bool{true, true}
for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
}
@ -298,7 +298,7 @@ func TestMemoryPressure(t *testing.T) {
// the best-effort pod should not admit, burstable should
expected = []bool{false, true}
for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
}
@ -322,7 +322,7 @@ func TestMemoryPressure(t *testing.T) {
// the best-effort pod should not admit, burstable should
expected = []bool{false, true}
for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
}
@ -346,7 +346,7 @@ func TestMemoryPressure(t *testing.T) {
// all pods should admit now
expected = []bool{true, true}
for i, pod := range []*api.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} {
if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit {
t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit)
}
@ -372,15 +372,15 @@ func TestDiskPressureNodeFs(t *testing.T) {
{name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), perLocalVolumeUsed: "300Mi"},
{name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "500Mi"},
}
pods := []*api.Pod{}
podStats := map[*api.Pod]statsapi.PodStats{}
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[5]
activePodsFunc := func() []*api.Pod {
activePodsFunc := func() []*v1.Pod {
return pods
}
@ -388,7 +388,7 @@ func TestDiskPressureNodeFs(t *testing.T) {
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
imageGC := &mockImageGC{freed: int64(0), err: nil}
nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
config := Config{
MaxPodGracePeriodSeconds: 5,
@ -569,15 +569,15 @@ func TestMinReclaim(t *testing.T) {
{name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "300Mi"},
{name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), memoryWorkingSet: "500Mi"},
}
pods := []*api.Pod{}
podStats := map[*api.Pod]statsapi.PodStats{}
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.memoryWorkingSet)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[5]
activePodsFunc := func() []*api.Pod {
activePodsFunc := func() []*v1.Pod {
return pods
}
@ -585,7 +585,7 @@ func TestMinReclaim(t *testing.T) {
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
imageGC := &mockImageGC{freed: int64(0), err: nil}
nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
config := Config{
MaxPodGracePeriodSeconds: 5,
@ -707,15 +707,15 @@ func TestNodeReclaimFuncs(t *testing.T) {
{name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "300Mi"},
{name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsUsed: "500Mi"},
}
pods := []*api.Pod{}
podStats := map[*api.Pod]statsapi.PodStats{}
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsUsed, podToMake.logsFsUsed, podToMake.perLocalVolumeUsed)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[5]
activePodsFunc := func() []*api.Pod {
activePodsFunc := func() []*v1.Pod {
return pods
}
@ -724,7 +724,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
imageGcFree := resource.MustParse("700Mi")
imageGC := &mockImageGC{freed: imageGcFree.Value(), err: nil}
nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
config := Config{
MaxPodGracePeriodSeconds: 5,
@ -866,14 +866,14 @@ func TestNodeReclaimFuncs(t *testing.T) {
}
func TestInodePressureNodeFsInodes(t *testing.T) {
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList, rootInodes, logInodes, volumeInodes string) (*api.Pod, statsapi.PodStats) {
pod := newPod(name, []api.Container{
podMaker := func(name string, requests v1.ResourceList, limits v1.ResourceList, rootInodes, logInodes, volumeInodes string) (*v1.Pod, statsapi.PodStats) {
pod := newPod(name, []v1.Container{
newContainer(name, requests, limits),
}, nil)
podStats := newPodInodeStats(pod, parseQuantity(rootInodes), parseQuantity(logInodes), parseQuantity(volumeInodes))
return pod, podStats
}
summaryStatsMaker := func(rootFsInodesFree, rootFsInodes string, podStats map[*api.Pod]statsapi.PodStats) *statsapi.Summary {
summaryStatsMaker := func(rootFsInodesFree, rootFsInodes string, podStats map[*v1.Pod]statsapi.PodStats) *statsapi.Summary {
rootFsInodesFreeVal := resource.MustParse(rootFsInodesFree)
internalRootFsInodesFree := uint64(rootFsInodesFreeVal.Value())
rootFsInodesVal := resource.MustParse(rootFsInodes)
@ -900,15 +900,15 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
{name: "best-effort-low", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsInodesUsed: "300Mi"},
{name: "best-effort-high", requests: newResourceList("", ""), limits: newResourceList("", ""), rootFsInodesUsed: "800Mi"},
}
pods := []*api.Pod{}
podStats := map[*api.Pod]statsapi.PodStats{}
pods := []*v1.Pod{}
podStats := map[*v1.Pod]statsapi.PodStats{}
for _, podToMake := range podsToMake {
pod, podStat := podMaker(podToMake.name, podToMake.requests, podToMake.limits, podToMake.rootFsInodesUsed, podToMake.logsFsInodesUsed, podToMake.perLocalVolumeInodesUsed)
pods = append(pods, pod)
podStats[pod] = podStat
}
podToEvict := pods[5]
activePodsFunc := func() []*api.Pod {
activePodsFunc := func() []*v1.Pod {
return pods
}
@ -916,7 +916,7 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
podKiller := &mockPodKiller{}
diskInfoProvider := &mockDiskInfoProvider{dedicatedImageFs: false}
imageGC := &mockImageGC{freed: int64(0), err: nil}
nodeRef := &api.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
nodeRef := &v1.ObjectReference{Kind: "Node", Name: "test", UID: types.UID("test"), Namespace: ""}
config := Config{
MaxPodGracePeriodSeconds: 5,

View File

@ -26,6 +26,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
@ -40,45 +41,45 @@ const (
// the message associated with the reason.
message = "The node was low on resource: %v."
// disk, in bytes. internal to this module, used to account for local disk usage.
resourceDisk api.ResourceName = "disk"
resourceDisk v1.ResourceName = "disk"
// inodes, number. internal to this module, used to account for local disk inode consumption.
resourceInodes api.ResourceName = "inodes"
resourceInodes v1.ResourceName = "inodes"
// imagefs, in bytes. internal to this module, used to account for local image filesystem usage.
resourceImageFs api.ResourceName = "imagefs"
resourceImageFs v1.ResourceName = "imagefs"
// imagefs inodes, number. internal to this module, used to account for local image filesystem inodes.
resourceImageFsInodes api.ResourceName = "imagefsInodes"
resourceImageFsInodes v1.ResourceName = "imagefsInodes"
// nodefs, in bytes. internal to this module, used to account for local node root filesystem usage.
resourceNodeFs api.ResourceName = "nodefs"
resourceNodeFs v1.ResourceName = "nodefs"
// nodefs inodes, number. internal to this module, used to account for local node root filesystem inodes.
resourceNodeFsInodes api.ResourceName = "nodefsInodes"
resourceNodeFsInodes v1.ResourceName = "nodefsInodes"
)
var (
// signalToNodeCondition maps a signal to the node condition to report if threshold is met.
signalToNodeCondition map[Signal]api.NodeConditionType
signalToNodeCondition map[Signal]v1.NodeConditionType
// signalToResource maps a Signal to its associated Resource.
signalToResource map[Signal]api.ResourceName
signalToResource map[Signal]v1.ResourceName
// resourceToSignal maps a Resource to its associated Signal
resourceToSignal map[api.ResourceName]Signal
resourceToSignal map[v1.ResourceName]Signal
)
func init() {
// map eviction signals to node conditions
signalToNodeCondition = map[Signal]api.NodeConditionType{}
signalToNodeCondition[SignalMemoryAvailable] = api.NodeMemoryPressure
signalToNodeCondition[SignalImageFsAvailable] = api.NodeDiskPressure
signalToNodeCondition[SignalNodeFsAvailable] = api.NodeDiskPressure
signalToNodeCondition[SignalImageFsInodesFree] = api.NodeDiskPressure
signalToNodeCondition[SignalNodeFsInodesFree] = api.NodeDiskPressure
signalToNodeCondition = map[Signal]v1.NodeConditionType{}
signalToNodeCondition[SignalMemoryAvailable] = v1.NodeMemoryPressure
signalToNodeCondition[SignalImageFsAvailable] = v1.NodeDiskPressure
signalToNodeCondition[SignalNodeFsAvailable] = v1.NodeDiskPressure
signalToNodeCondition[SignalImageFsInodesFree] = v1.NodeDiskPressure
signalToNodeCondition[SignalNodeFsInodesFree] = v1.NodeDiskPressure
// map signals to resources (and vice-versa)
signalToResource = map[Signal]api.ResourceName{}
signalToResource[SignalMemoryAvailable] = api.ResourceMemory
signalToResource = map[Signal]v1.ResourceName{}
signalToResource[SignalMemoryAvailable] = v1.ResourceMemory
signalToResource[SignalImageFsAvailable] = resourceImageFs
signalToResource[SignalImageFsInodesFree] = resourceImageFsInodes
signalToResource[SignalNodeFsAvailable] = resourceNodeFs
signalToResource[SignalNodeFsInodesFree] = resourceNodeFsInodes
resourceToSignal = map[api.ResourceName]Signal{}
resourceToSignal = map[v1.ResourceName]Signal{}
for key, value := range signalToResource {
resourceToSignal[value] = key
}
@ -337,11 +338,11 @@ func memoryUsage(memStats *statsapi.MemoryStats) *resource.Quantity {
// localVolumeNames returns the set of volumes for the pod that are local
// TODO: sumamry API should report what volumes consume local storage rather than hard-code here.
func localVolumeNames(pod *api.Pod) []string {
func localVolumeNames(pod *v1.Pod) []string {
result := []string{}
for _, volume := range pod.Spec.Volumes {
if volume.HostPath != nil ||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != api.StorageMediumMemory) ||
(volume.EmptyDir != nil && volume.EmptyDir.Medium != v1.StorageMediumMemory) ||
volume.ConfigMap != nil ||
volume.GitRepo != nil {
result = append(result, volume.Name)
@ -351,7 +352,7 @@ func localVolumeNames(pod *api.Pod) []string {
}
// podDiskUsage aggregates pod disk usage and inode consumption for the specified stats to measure.
func podDiskUsage(podStats statsapi.PodStats, pod *api.Pod, statsToMeasure []fsStatsType) (api.ResourceList, error) {
func podDiskUsage(podStats statsapi.PodStats, pod *v1.Pod, statsToMeasure []fsStatsType) (v1.ResourceList, error) {
disk := resource.Quantity{Format: resource.BinarySI}
inodes := resource.Quantity{Format: resource.BinarySI}
for _, container := range podStats.Containers {
@ -376,14 +377,14 @@ func podDiskUsage(podStats statsapi.PodStats, pod *api.Pod, statsToMeasure []fsS
}
}
}
return api.ResourceList{
return v1.ResourceList{
resourceDisk: disk,
resourceInodes: inodes,
}, nil
}
// podMemoryUsage aggregates pod memory usage.
func podMemoryUsage(podStats statsapi.PodStats) (api.ResourceList, error) {
func podMemoryUsage(podStats statsapi.PodStats) (v1.ResourceList, error) {
disk := resource.Quantity{Format: resource.BinarySI}
memory := resource.Quantity{Format: resource.BinarySI}
for _, container := range podStats.Containers {
@ -394,8 +395,8 @@ func podMemoryUsage(podStats statsapi.PodStats) (api.ResourceList, error) {
// memory usage (if known)
memory.Add(*memoryUsage(container.Memory))
}
return api.ResourceList{
api.ResourceMemory: memory,
return v1.ResourceList{
v1.ResourceMemory: memory,
resourceDisk: disk,
}, nil
}
@ -419,7 +420,7 @@ func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc {
for i := range podStats {
uid2PodStats[podStats[i].PodRef.UID] = podStats[i]
}
return func(pod *api.Pod) (statsapi.PodStats, bool) {
return func(pod *v1.Pod) (statsapi.PodStats, bool) {
stats, found := uid2PodStats[string(pod.UID)]
return stats, found
}
@ -431,16 +432,16 @@ func cachedStatsFunc(podStats []statsapi.PodStats) statsFunc {
// 0 if p1 == p2
// +1 if p1 > p2
//
type cmpFunc func(p1, p2 *api.Pod) int
type cmpFunc func(p1, p2 *v1.Pod) int
// multiSorter implements the Sort interface, sorting changes within.
type multiSorter struct {
pods []*api.Pod
pods []*v1.Pod
cmp []cmpFunc
}
// Sort sorts the argument slice according to the less functions passed to OrderedBy.
func (ms *multiSorter) Sort(pods []*api.Pod) {
func (ms *multiSorter) Sort(pods []*v1.Pod) {
ms.pods = pods
sort.Sort(ms)
}
@ -484,7 +485,7 @@ func (ms *multiSorter) Less(i, j int) bool {
}
// qosComparator compares pods by QoS (BestEffort < Burstable < Guaranteed)
func qosComparator(p1, p2 *api.Pod) int {
func qosComparator(p1, p2 *v1.Pod) int {
qosP1 := qos.GetPodQOS(p1)
qosP2 := qos.GetPodQOS(p2)
// its a tie
@ -508,7 +509,7 @@ func qosComparator(p1, p2 *api.Pod) int {
// memory compares pods by largest consumer of memory relative to request.
func memory(stats statsFunc) cmpFunc {
return func(p1, p2 *api.Pod) int {
return func(p1, p2 *v1.Pod) int {
p1Stats, found := stats(p1)
// if we have no usage stats for p1, we want p2 first
if !found {
@ -531,12 +532,12 @@ func memory(stats statsFunc) cmpFunc {
}
// adjust p1, p2 usage relative to the request (if any)
p1Memory := p1Usage[api.ResourceMemory]
p1Memory := p1Usage[v1.ResourceMemory]
p1Spec := core.PodUsageFunc(p1)
p1Request := p1Spec[api.ResourceRequestsMemory]
p1Memory.Sub(p1Request)
p2Memory := p2Usage[api.ResourceMemory]
p2Memory := p2Usage[v1.ResourceMemory]
p2Spec := core.PodUsageFunc(p2)
p2Request := p2Spec[api.ResourceRequestsMemory]
p2Memory.Sub(p2Request)
@ -547,8 +548,8 @@ func memory(stats statsFunc) cmpFunc {
}
// disk compares pods by largest consumer of disk relative to request for the specified disk resource.
func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource api.ResourceName) cmpFunc {
return func(p1, p2 *api.Pod) int {
func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) cmpFunc {
return func(p1, p2 *v1.Pod) int {
p1Stats, found := stats(p1)
// if we have no usage stats for p1, we want p2 first
if !found {
@ -580,26 +581,26 @@ func disk(stats statsFunc, fsStatsToMeasure []fsStatsType, diskResource api.Reso
}
// rankMemoryPressure orders the input pods for eviction in response to memory pressure.
func rankMemoryPressure(pods []*api.Pod, stats statsFunc) {
func rankMemoryPressure(pods []*v1.Pod, stats statsFunc) {
orderedBy(qosComparator, memory(stats)).Sort(pods)
}
// rankDiskPressureFunc returns a rankFunc that measures the specified fs stats.
func rankDiskPressureFunc(fsStatsToMeasure []fsStatsType, diskResource api.ResourceName) rankFunc {
return func(pods []*api.Pod, stats statsFunc) {
func rankDiskPressureFunc(fsStatsToMeasure []fsStatsType, diskResource v1.ResourceName) rankFunc {
return func(pods []*v1.Pod, stats statsFunc) {
orderedBy(qosComparator, disk(stats, fsStatsToMeasure, diskResource)).Sort(pods)
}
}
// byEvictionPriority implements sort.Interface for []api.ResourceName.
type byEvictionPriority []api.ResourceName
// byEvictionPriority implements sort.Interface for []v1.ResourceName.
type byEvictionPriority []v1.ResourceName
func (a byEvictionPriority) Len() int { return len(a) }
func (a byEvictionPriority) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
// Less ranks memory before all other resources.
func (a byEvictionPriority) Less(i, j int) bool {
return a[i] == api.ResourceMemory
return a[i] == v1.ResourceMemory
}
// makeSignalObservations derives observations using the specified summary provider.
@ -740,8 +741,8 @@ func thresholdsMetGracePeriod(observedAt thresholdsObservedAt, now time.Time) []
}
// nodeConditions returns the set of node conditions associated with a threshold
func nodeConditions(thresholds []Threshold) []api.NodeConditionType {
results := []api.NodeConditionType{}
func nodeConditions(thresholds []Threshold) []v1.NodeConditionType {
results := []v1.NodeConditionType{}
for _, threshold := range thresholds {
if nodeCondition, found := signalToNodeCondition[threshold.Signal]; found {
if !hasNodeCondition(results, nodeCondition) {
@ -753,7 +754,7 @@ func nodeConditions(thresholds []Threshold) []api.NodeConditionType {
}
// nodeConditionsLastObservedAt merges the input with the previous observation to determine when a condition was most recently met.
func nodeConditionsLastObservedAt(nodeConditions []api.NodeConditionType, lastObservedAt nodeConditionsObservedAt, now time.Time) nodeConditionsObservedAt {
func nodeConditionsLastObservedAt(nodeConditions []v1.NodeConditionType, lastObservedAt nodeConditionsObservedAt, now time.Time) nodeConditionsObservedAt {
results := nodeConditionsObservedAt{}
// the input conditions were observed "now"
for i := range nodeConditions {
@ -770,8 +771,8 @@ func nodeConditionsLastObservedAt(nodeConditions []api.NodeConditionType, lastOb
}
// nodeConditionsObservedSince returns the set of conditions that have been observed within the specified period
func nodeConditionsObservedSince(observedAt nodeConditionsObservedAt, period time.Duration, now time.Time) []api.NodeConditionType {
results := []api.NodeConditionType{}
func nodeConditionsObservedSince(observedAt nodeConditionsObservedAt, period time.Duration, now time.Time) []v1.NodeConditionType {
results := []v1.NodeConditionType{}
for nodeCondition, at := range observedAt {
duration := now.Sub(at)
if duration < period {
@ -792,7 +793,7 @@ func hasFsStatsType(inputs []fsStatsType, item fsStatsType) bool {
}
// hasNodeCondition returns true if the node condition is in the input list
func hasNodeCondition(inputs []api.NodeConditionType, item api.NodeConditionType) bool {
func hasNodeCondition(inputs []v1.NodeConditionType, item v1.NodeConditionType) bool {
for _, input := range inputs {
if input == item {
return true
@ -837,8 +838,8 @@ func compareThresholdValue(a ThresholdValue, b ThresholdValue) bool {
}
// getStarvedResources returns the set of resources that are starved based on thresholds met.
func getStarvedResources(thresholds []Threshold) []api.ResourceName {
results := []api.ResourceName{}
func getStarvedResources(thresholds []Threshold) []v1.ResourceName {
results := []v1.ResourceName{}
for _, threshold := range thresholds {
if starvedResource, found := signalToResource[threshold.Signal]; found {
results = append(results, starvedResource)
@ -848,7 +849,7 @@ func getStarvedResources(thresholds []Threshold) []api.ResourceName {
}
// isSoftEviction returns true if the thresholds met for the starved resource are only soft thresholds
func isSoftEvictionThresholds(thresholds []Threshold, starvedResource api.ResourceName) bool {
func isSoftEvictionThresholds(thresholds []Threshold, starvedResource v1.ResourceName) bool {
for _, threshold := range thresholds {
if resourceToCheck := signalToResource[threshold.Signal]; resourceToCheck != starvedResource {
continue
@ -866,9 +867,9 @@ func isHardEvictionThreshold(threshold Threshold) bool {
}
// buildResourceToRankFunc returns ranking functions associated with resources
func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc {
resourceToRankFunc := map[api.ResourceName]rankFunc{
api.ResourceMemory: rankMemoryPressure,
func buildResourceToRankFunc(withImageFs bool) map[v1.ResourceName]rankFunc {
resourceToRankFunc := map[v1.ResourceName]rankFunc{
v1.ResourceMemory: rankMemoryPressure,
}
// usage of an imagefs is optional
if withImageFs {
@ -890,13 +891,13 @@ func buildResourceToRankFunc(withImageFs bool) map[api.ResourceName]rankFunc {
}
// PodIsEvicted returns true if the reported pod status is due to an eviction.
func PodIsEvicted(podStatus api.PodStatus) bool {
return podStatus.Phase == api.PodFailed && podStatus.Reason == reason
func PodIsEvicted(podStatus v1.PodStatus) bool {
return podStatus.Phase == v1.PodFailed && podStatus.Reason == reason
}
// buildResourceToNodeReclaimFuncs returns reclaim functions associated with resources.
func buildResourceToNodeReclaimFuncs(imageGC ImageGC, withImageFs bool) map[api.ResourceName]nodeReclaimFuncs {
resourceToReclaimFunc := map[api.ResourceName]nodeReclaimFuncs{}
func buildResourceToNodeReclaimFuncs(imageGC ImageGC, withImageFs bool) map[v1.ResourceName]nodeReclaimFuncs {
resourceToReclaimFunc := map[v1.ResourceName]nodeReclaimFuncs{}
// usage of an imagefs is optional
if withImageFs {
// with an imagefs, nodefs pressure should just delete logs

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/types"
@ -397,20 +398,20 @@ func thresholdEqual(a Threshold, b Threshold) bool {
// TestOrderedByQoS ensures we order BestEffort < Burstable < Guaranteed
func TestOrderedByQoS(t *testing.T) {
bestEffort := newPod("best-effort", []api.Container{
bestEffort := newPod("best-effort", []v1.Container{
newContainer("best-effort", newResourceList("", ""), newResourceList("", "")),
}, nil)
burstable := newPod("burstable", []api.Container{
burstable := newPod("burstable", []v1.Container{
newContainer("burstable", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi")),
}, nil)
guaranteed := newPod("guaranteed", []api.Container{
guaranteed := newPod("guaranteed", []v1.Container{
newContainer("guaranteed", newResourceList("200m", "200Mi"), newResourceList("200m", "200Mi")),
}, nil)
pods := []*api.Pod{guaranteed, burstable, bestEffort}
pods := []*v1.Pod{guaranteed, burstable, bestEffort}
orderedBy(qosComparator).Sort(pods)
expected := []*api.Pod{bestEffort, burstable, guaranteed}
expected := []*v1.Pod{bestEffort, burstable, guaranteed}
for i := range expected {
if pods[i] != expected[i] {
t.Errorf("Expected pod: %s, but got: %s", expected[i].Name, pods[i].Name)
@ -427,51 +428,51 @@ func TestOrderedbyInodes(t *testing.T) {
}
// testOrderedByDisk ensures we order pods by greediest resource consumer
func testOrderedByResource(t *testing.T, orderedByResource api.ResourceName,
newPodStatsFunc func(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) {
pod1 := newPod("best-effort-high", []api.Container{
func testOrderedByResource(t *testing.T, orderedByResource v1.ResourceName,
newPodStatsFunc func(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) {
pod1 := newPod("best-effort-high", []v1.Container{
newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod2 := newPod("best-effort-low", []api.Container{
pod2 := newPod("best-effort-low", []v1.Container{
newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod3 := newPod("burstable-high", []api.Container{
pod3 := newPod("burstable-high", []v1.Container{
newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod4 := newPod("burstable-low", []api.Container{
pod4 := newPod("burstable-low", []v1.Container{
newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod5 := newPod("guaranteed-high", []api.Container{
pod5 := newPod("guaranteed-high", []v1.Container{
newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod6 := newPod("guaranteed-low", []api.Container{
pod6 := newPod("guaranteed-low", []v1.Container{
newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
stats := map[*api.Pod]statsapi.PodStats{
stats := map[*v1.Pod]statsapi.PodStats{
pod1: newPodStatsFunc(pod1, resource.MustParse("50Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 200Mi
pod2: newPodStatsFunc(pod2, resource.MustParse("100Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 300Mi
pod3: newPodStatsFunc(pod3, resource.MustParse("200Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 400Mi
@ -479,13 +480,13 @@ func testOrderedByResource(t *testing.T, orderedByResource api.ResourceName,
pod5: newPodStatsFunc(pod5, resource.MustParse("400Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 550Mi
pod6: newPodStatsFunc(pod6, resource.MustParse("500Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 650Mi
}
statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) {
statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) {
result, found := stats[pod]
return result, found
}
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
orderedBy(disk(statsFn, []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, orderedByResource)).Sort(pods)
expected := []*api.Pod{pod6, pod5, pod4, pod3, pod2, pod1}
expected := []*v1.Pod{pod6, pod5, pod4, pod3, pod2, pod1}
for i := range expected {
if pods[i] != expected[i] {
t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name)
@ -502,51 +503,51 @@ func TestOrderedbyQoSInodes(t *testing.T) {
}
// testOrderedByQoSDisk ensures we order pods by qos and then greediest resource consumer
func testOrderedByQoSResource(t *testing.T, orderedByResource api.ResourceName,
newPodStatsFunc func(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) {
pod1 := newPod("best-effort-high", []api.Container{
func testOrderedByQoSResource(t *testing.T, orderedByResource v1.ResourceName,
newPodStatsFunc func(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats) {
pod1 := newPod("best-effort-high", []v1.Container{
newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod2 := newPod("best-effort-low", []api.Container{
pod2 := newPod("best-effort-low", []v1.Container{
newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod3 := newPod("burstable-high", []api.Container{
pod3 := newPod("burstable-high", []v1.Container{
newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod4 := newPod("burstable-low", []api.Container{
pod4 := newPod("burstable-low", []v1.Container{
newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod5 := newPod("guaranteed-high", []api.Container{
pod5 := newPod("guaranteed-high", []v1.Container{
newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
pod6 := newPod("guaranteed-low", []api.Container{
pod6 := newPod("guaranteed-low", []v1.Container{
newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, []api.Volume{
newVolume("local-volume", api.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{},
}, []v1.Volume{
newVolume("local-volume", v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
}),
})
stats := map[*api.Pod]statsapi.PodStats{
stats := map[*v1.Pod]statsapi.PodStats{
pod1: newPodStatsFunc(pod1, resource.MustParse("50Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 200Mi
pod2: newPodStatsFunc(pod2, resource.MustParse("100Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 300Mi
pod3: newPodStatsFunc(pod3, resource.MustParse("200Mi"), resource.MustParse("150Mi"), resource.MustParse("50Mi")), // 400Mi
@ -554,13 +555,13 @@ func testOrderedByQoSResource(t *testing.T, orderedByResource api.ResourceName,
pod5: newPodStatsFunc(pod5, resource.MustParse("400Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 550Mi
pod6: newPodStatsFunc(pod6, resource.MustParse("500Mi"), resource.MustParse("100Mi"), resource.MustParse("50Mi")), // 650Mi
}
statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) {
statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) {
result, found := stats[pod]
return result, found
}
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
orderedBy(qosComparator, disk(statsFn, []fsStatsType{fsStatsRoot, fsStatsLogs, fsStatsLocalVolumeSource}, orderedByResource)).Sort(pods)
expected := []*api.Pod{pod2, pod1, pod4, pod3, pod6, pod5}
expected := []*v1.Pod{pod2, pod1, pod4, pod3, pod6, pod5}
for i := range expected {
if pods[i] != expected[i] {
t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name)
@ -570,25 +571,25 @@ func testOrderedByQoSResource(t *testing.T, orderedByResource api.ResourceName,
// TestOrderedByMemory ensures we order pods by greediest memory consumer relative to request.
func TestOrderedByMemory(t *testing.T) {
pod1 := newPod("best-effort-high", []api.Container{
pod1 := newPod("best-effort-high", []v1.Container{
newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")),
}, nil)
pod2 := newPod("best-effort-low", []api.Container{
pod2 := newPod("best-effort-low", []v1.Container{
newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")),
}, nil)
pod3 := newPod("burstable-high", []api.Container{
pod3 := newPod("burstable-high", []v1.Container{
newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, nil)
pod4 := newPod("burstable-low", []api.Container{
pod4 := newPod("burstable-low", []v1.Container{
newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, nil)
pod5 := newPod("guaranteed-high", []api.Container{
pod5 := newPod("guaranteed-high", []v1.Container{
newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, nil)
pod6 := newPod("guaranteed-low", []api.Container{
pod6 := newPod("guaranteed-low", []v1.Container{
newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, nil)
stats := map[*api.Pod]statsapi.PodStats{
stats := map[*v1.Pod]statsapi.PodStats{
pod1: newPodMemoryStats(pod1, resource.MustParse("500Mi")), // 500 relative to request
pod2: newPodMemoryStats(pod2, resource.MustParse("300Mi")), // 300 relative to request
pod3: newPodMemoryStats(pod3, resource.MustParse("800Mi")), // 700 relative to request
@ -596,13 +597,13 @@ func TestOrderedByMemory(t *testing.T) {
pod5: newPodMemoryStats(pod5, resource.MustParse("800Mi")), // -200 relative to request
pod6: newPodMemoryStats(pod6, resource.MustParse("200Mi")), // -800 relative to request
}
statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) {
statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) {
result, found := stats[pod]
return result, found
}
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
orderedBy(memory(statsFn)).Sort(pods)
expected := []*api.Pod{pod3, pod1, pod2, pod4, pod5, pod6}
expected := []*v1.Pod{pod3, pod1, pod2, pod4, pod5, pod6}
for i := range expected {
if pods[i] != expected[i] {
t.Errorf("Expected pod[%d]: %s, but got: %s", i, expected[i].Name, pods[i].Name)
@ -612,25 +613,25 @@ func TestOrderedByMemory(t *testing.T) {
// TestOrderedByQoSMemory ensures we order by qosComparator and then memory consumption relative to request.
func TestOrderedByQoSMemory(t *testing.T) {
pod1 := newPod("best-effort-high", []api.Container{
pod1 := newPod("best-effort-high", []v1.Container{
newContainer("best-effort-high", newResourceList("", ""), newResourceList("", "")),
}, nil)
pod2 := newPod("best-effort-low", []api.Container{
pod2 := newPod("best-effort-low", []v1.Container{
newContainer("best-effort-low", newResourceList("", ""), newResourceList("", "")),
}, nil)
pod3 := newPod("burstable-high", []api.Container{
pod3 := newPod("burstable-high", []v1.Container{
newContainer("burstable-high", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, nil)
pod4 := newPod("burstable-low", []api.Container{
pod4 := newPod("burstable-low", []v1.Container{
newContainer("burstable-low", newResourceList("100m", "100Mi"), newResourceList("200m", "1Gi")),
}, nil)
pod5 := newPod("guaranteed-high", []api.Container{
pod5 := newPod("guaranteed-high", []v1.Container{
newContainer("guaranteed-high", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, nil)
pod6 := newPod("guaranteed-low", []api.Container{
pod6 := newPod("guaranteed-low", []v1.Container{
newContainer("guaranteed-low", newResourceList("100m", "1Gi"), newResourceList("100m", "1Gi")),
}, nil)
stats := map[*api.Pod]statsapi.PodStats{
stats := map[*v1.Pod]statsapi.PodStats{
pod1: newPodMemoryStats(pod1, resource.MustParse("500Mi")), // 500 relative to request
pod2: newPodMemoryStats(pod2, resource.MustParse("50Mi")), // 50 relative to request
pod3: newPodMemoryStats(pod3, resource.MustParse("50Mi")), // -50 relative to request
@ -638,12 +639,12 @@ func TestOrderedByQoSMemory(t *testing.T) {
pod5: newPodMemoryStats(pod5, resource.MustParse("800Mi")), // -200 relative to request
pod6: newPodMemoryStats(pod6, resource.MustParse("200Mi")), // -800 relative to request
}
statsFn := func(pod *api.Pod) (statsapi.PodStats, bool) {
statsFn := func(pod *v1.Pod) (statsapi.PodStats, bool) {
result, found := stats[pod]
return result, found
}
pods := []*api.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
expected := []*api.Pod{pod1, pod2, pod4, pod3, pod5, pod6}
pods := []*v1.Pod{pod1, pod2, pod3, pod4, pod5, pod6}
expected := []*v1.Pod{pod1, pod2, pod4, pod3, pod5, pod6}
orderedBy(qosComparator, memory(statsFn)).Sort(pods)
for i := range expected {
if pods[i] != expected[i] {
@ -662,7 +663,7 @@ func (f *fakeSummaryProvider) Get() (*statsapi.Summary, error) {
// newPodStats returns a pod stat where each container is using the specified working set
// each pod must have a Name, UID, Namespace
func newPodStats(pod *api.Pod, containerWorkingSetBytes int64) statsapi.PodStats {
func newPodStats(pod *v1.Pod, containerWorkingSetBytes int64) statsapi.PodStats {
result := statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name,
@ -682,14 +683,14 @@ func newPodStats(pod *api.Pod, containerWorkingSetBytes int64) statsapi.PodStats
}
func TestMakeSignalObservations(t *testing.T) {
podMaker := func(name, namespace, uid string, numContainers int) *api.Pod {
pod := &api.Pod{}
podMaker := func(name, namespace, uid string, numContainers int) *v1.Pod {
pod := &v1.Pod{}
pod.Name = name
pod.Namespace = namespace
pod.UID = types.UID(uid)
pod.Spec = api.PodSpec{}
pod.Spec = v1.PodSpec{}
for i := 0; i < numContainers; i++ {
pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: fmt.Sprintf("ctr%v", i),
})
}
@ -731,7 +732,7 @@ func TestMakeSignalObservations(t *testing.T) {
provider := &fakeSummaryProvider{
result: fakeStats,
}
pods := []*api.Pod{
pods := []*v1.Pod{
podMaker("pod1", "ns1", "uuid1", 1),
podMaker("pod1", "ns2", "uuid2", 1),
podMaker("pod3", "ns3", "uuid3", 1),
@ -1199,17 +1200,17 @@ func TestThresholdsMetGracePeriod(t *testing.T) {
func TestNodeConditions(t *testing.T) {
testCases := map[string]struct {
inputs []Threshold
result []api.NodeConditionType
result []v1.NodeConditionType
}{
"empty-list": {
inputs: []Threshold{},
result: []api.NodeConditionType{},
result: []v1.NodeConditionType{},
},
"memory.available": {
inputs: []Threshold{
{Signal: SignalMemoryAvailable},
},
result: []api.NodeConditionType{api.NodeMemoryPressure},
result: []v1.NodeConditionType{v1.NodeMemoryPressure},
},
}
for testName, testCase := range testCases {
@ -1224,37 +1225,37 @@ func TestNodeConditionsLastObservedAt(t *testing.T) {
now := unversioned.Now()
oldTime := unversioned.NewTime(now.Time.Add(-1 * time.Minute))
testCases := map[string]struct {
nodeConditions []api.NodeConditionType
nodeConditions []v1.NodeConditionType
lastObservedAt nodeConditionsObservedAt
now time.Time
result nodeConditionsObservedAt
}{
"no-previous-observation": {
nodeConditions: []api.NodeConditionType{api.NodeMemoryPressure},
nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure},
lastObservedAt: nodeConditionsObservedAt{},
now: now.Time,
result: nodeConditionsObservedAt{
api.NodeMemoryPressure: now.Time,
v1.NodeMemoryPressure: now.Time,
},
},
"previous-observation": {
nodeConditions: []api.NodeConditionType{api.NodeMemoryPressure},
nodeConditions: []v1.NodeConditionType{v1.NodeMemoryPressure},
lastObservedAt: nodeConditionsObservedAt{
api.NodeMemoryPressure: oldTime.Time,
v1.NodeMemoryPressure: oldTime.Time,
},
now: now.Time,
result: nodeConditionsObservedAt{
api.NodeMemoryPressure: now.Time,
v1.NodeMemoryPressure: now.Time,
},
},
"old-observation": {
nodeConditions: []api.NodeConditionType{},
nodeConditions: []v1.NodeConditionType{},
lastObservedAt: nodeConditionsObservedAt{
api.NodeMemoryPressure: oldTime.Time,
v1.NodeMemoryPressure: oldTime.Time,
},
now: now.Time,
result: nodeConditionsObservedAt{
api.NodeMemoryPressure: oldTime.Time,
v1.NodeMemoryPressure: oldTime.Time,
},
},
}
@ -1273,23 +1274,23 @@ func TestNodeConditionsObservedSince(t *testing.T) {
observedAt nodeConditionsObservedAt
period time.Duration
now time.Time
result []api.NodeConditionType
result []v1.NodeConditionType
}{
"in-period": {
observedAt: nodeConditionsObservedAt{
api.NodeMemoryPressure: observedTime.Time,
v1.NodeMemoryPressure: observedTime.Time,
},
period: 2 * time.Minute,
now: now.Time,
result: []api.NodeConditionType{api.NodeMemoryPressure},
result: []v1.NodeConditionType{v1.NodeMemoryPressure},
},
"out-of-period": {
observedAt: nodeConditionsObservedAt{
api.NodeMemoryPressure: observedTime.Time,
v1.NodeMemoryPressure: observedTime.Time,
},
period: 30 * time.Second,
now: now.Time,
result: []api.NodeConditionType{},
result: []v1.NodeConditionType{},
},
}
for testName, testCase := range testCases {
@ -1302,18 +1303,18 @@ func TestNodeConditionsObservedSince(t *testing.T) {
func TestHasNodeConditions(t *testing.T) {
testCases := map[string]struct {
inputs []api.NodeConditionType
item api.NodeConditionType
inputs []v1.NodeConditionType
item v1.NodeConditionType
result bool
}{
"has-condition": {
inputs: []api.NodeConditionType{api.NodeReady, api.NodeOutOfDisk, api.NodeMemoryPressure},
item: api.NodeMemoryPressure,
inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeOutOfDisk, v1.NodeMemoryPressure},
item: v1.NodeMemoryPressure,
result: true,
},
"does-not-have-condition": {
inputs: []api.NodeConditionType{api.NodeReady, api.NodeOutOfDisk},
item: api.NodeMemoryPressure,
inputs: []v1.NodeConditionType{v1.NodeReady, v1.NodeOutOfDisk},
item: v1.NodeMemoryPressure,
result: false,
},
}
@ -1327,31 +1328,38 @@ func TestHasNodeConditions(t *testing.T) {
func TestGetStarvedResources(t *testing.T) {
testCases := map[string]struct {
inputs []Threshold
result []api.ResourceName
result []v1.ResourceName
}{
"memory.available": {
inputs: []Threshold{
{Signal: SignalMemoryAvailable},
},
result: []api.ResourceName{api.ResourceMemory},
result: []v1.ResourceName{v1.ResourceMemory},
},
"imagefs.available": {
inputs: []Threshold{
{Signal: SignalImageFsAvailable},
},
result: []api.ResourceName{resourceImageFs},
result: []v1.ResourceName{resourceImageFs},
},
"nodefs.available": {
inputs: []Threshold{
{Signal: SignalNodeFsAvailable},
},
result: []api.ResourceName{resourceNodeFs},
result: []v1.ResourceName{resourceNodeFs},
},
}
var internalResourceNames = func(in []v1.ResourceName) []api.ResourceName {
var out []api.ResourceName
for _, name := range in {
out = append(out, api.ResourceName(name))
}
return out
}
for testName, testCase := range testCases {
actual := getStarvedResources(testCase.inputs)
actualSet := quota.ToSet(actual)
expectedSet := quota.ToSet(testCase.result)
actualSet := quota.ToSet(internalResourceNames(actual))
expectedSet := quota.ToSet(internalResourceNames(testCase.result))
if !actualSet.Equal(expectedSet) {
t.Errorf("Test case: %s, expected: %v, actual: %v", testName, expectedSet, actualSet)
}
@ -1448,7 +1456,7 @@ func testCompareThresholdValue(t *testing.T) {
}
// newPodInodeStats returns stats with specified usage amounts.
func newPodInodeStats(pod *api.Pod, rootFsInodesUsed, logsInodesUsed, perLocalVolumeInodesUsed resource.Quantity) statsapi.PodStats {
func newPodInodeStats(pod *v1.Pod, rootFsInodesUsed, logsInodesUsed, perLocalVolumeInodesUsed resource.Quantity) statsapi.PodStats {
result := statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID),
@ -1480,7 +1488,7 @@ func newPodInodeStats(pod *api.Pod, rootFsInodesUsed, logsInodesUsed, perLocalVo
}
// newPodDiskStats returns stats with specified usage amounts.
func newPodDiskStats(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats {
func newPodDiskStats(pod *v1.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed resource.Quantity) statsapi.PodStats {
result := statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID),
@ -1513,7 +1521,7 @@ func newPodDiskStats(pod *api.Pod, rootFsUsed, logsUsed, perLocalVolumeUsed reso
return result
}
func newPodMemoryStats(pod *api.Pod, workingSet resource.Quantity) statsapi.PodStats {
func newPodMemoryStats(pod *v1.Pod, workingSet resource.Quantity) statsapi.PodStats {
result := statsapi.PodStats{
PodRef: statsapi.PodReference{
Name: pod.Name, Namespace: pod.Namespace, UID: string(pod.UID),
@ -1530,46 +1538,46 @@ func newPodMemoryStats(pod *api.Pod, workingSet resource.Quantity) statsapi.PodS
return result
}
func newResourceList(cpu, memory string) api.ResourceList {
res := api.ResourceList{}
func newResourceList(cpu, memory string) v1.ResourceList {
res := v1.ResourceList{}
if cpu != "" {
res[api.ResourceCPU] = resource.MustParse(cpu)
res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
res[api.ResourceMemory] = resource.MustParse(memory)
res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
func newResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements {
res := api.ResourceRequirements{}
func newResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := v1.ResourceRequirements{}
res.Requests = requests
res.Limits = limits
return res
}
func newContainer(name string, requests api.ResourceList, limits api.ResourceList) api.Container {
return api.Container{
func newContainer(name string, requests v1.ResourceList, limits v1.ResourceList) v1.Container {
return v1.Container{
Name: name,
Resources: newResourceRequirements(requests, limits),
}
}
func newVolume(name string, volumeSource api.VolumeSource) api.Volume {
return api.Volume{
func newVolume(name string, volumeSource v1.VolumeSource) v1.Volume {
return v1.Volume{
Name: name,
VolumeSource: volumeSource,
}
}
// newPod uses the name as the uid. Make names unique for testing.
func newPod(name string, containers []api.Container, volumes []api.Volume) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
func newPod(name string, containers []v1.Container, volumes []v1.Volume) *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
UID: types.UID(name),
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
Containers: containers,
Volumes: volumes,
},
@ -1577,7 +1585,7 @@ func newPod(name string, containers []api.Container, volumes []api.Volume) *api.
}
// nodeConditionList is a simple alias to support equality checking independent of order
type nodeConditionList []api.NodeConditionType
type nodeConditionList []v1.NodeConditionType
// Equal adds the ability to check equality between two lists of node conditions.
func (s1 nodeConditionList) Equal(s2 nodeConditionList) bool {

View File

@ -19,9 +19,9 @@ package eviction
import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
)
@ -126,16 +126,16 @@ type ImageGC interface {
// pod - the pod to kill
// status - the desired status to associate with the pod (i.e. why its killed)
// gracePeriodOverride - the grace period override to use instead of what is on the pod spec
type KillPodFunc func(pod *api.Pod, status api.PodStatus, gracePeriodOverride *int64) error
type KillPodFunc func(pod *v1.Pod, status v1.PodStatus, gracePeriodOverride *int64) error
// ActivePodsFunc returns pods bound to the kubelet that are active (i.e. non-terminal state)
type ActivePodsFunc func() []*api.Pod
type ActivePodsFunc func() []*v1.Pod
// statsFunc returns the usage stats if known for an input pod.
type statsFunc func(pod *api.Pod) (statsapi.PodStats, bool)
type statsFunc func(pod *v1.Pod) (statsapi.PodStats, bool)
// rankFunc sorts the pods in eviction order
type rankFunc func(pods []*api.Pod, stats statsFunc)
type rankFunc func(pods []*v1.Pod, stats statsFunc)
// signalObservation is the observed resource usage
type signalObservation struct {
@ -154,7 +154,7 @@ type signalObservations map[Signal]signalObservation
type thresholdsObservedAt map[Threshold]time.Time
// nodeConditionsObservedAt maps a node condition to a time that it was observed
type nodeConditionsObservedAt map[api.NodeConditionType]time.Time
type nodeConditionsObservedAt map[v1.NodeConditionType]time.Time
// nodeReclaimFunc is a function that knows how to reclaim a resource from the node without impacting pods.
type nodeReclaimFunc func() (*resource.Quantity, error)

View File

@ -19,7 +19,7 @@ package images
import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/flowcontrol"
)
@ -42,7 +42,7 @@ type throttledImageService struct {
limiter flowcontrol.RateLimiter
}
func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []api.Secret) error {
func (ts throttledImageService) PullImage(image kubecontainer.ImageSpec, secrets []v1.Secret) error {
if ts.limiter.TryAccept() {
return ts.ImageService.PullImage(image, secrets)
}

View File

@ -24,7 +24,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/container"
@ -85,7 +85,7 @@ type realImageGCManager struct {
recorder record.EventRecorder
// Reference to this node.
nodeRef *api.ObjectReference
nodeRef *v1.ObjectReference
// Track initialization
initialized bool
@ -103,7 +103,7 @@ type imageRecord struct {
size int64
}
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *api.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
func NewImageGCManager(runtime container.Runtime, cadvisorInterface cadvisor.Interface, recorder record.EventRecorder, nodeRef *v1.ObjectReference, policy ImageGCPolicy) (ImageGCManager, error) {
// Validate policy.
if policy.HighThresholdPercent < 0 || policy.HighThresholdPercent > 100 {
return nil, fmt.Errorf("invalid HighThresholdPercent %d, must be in range [0-100]", policy.HighThresholdPercent)
@ -227,7 +227,7 @@ func (im *realImageGCManager) GarbageCollect() error {
// Check valid capacity.
if capacity == 0 {
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
return err
}
@ -243,7 +243,7 @@ func (im *realImageGCManager) GarbageCollect() error {
if freed < amountToFree {
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d, but freed %d", amountToFree, freed)
im.recorder.Eventf(im.nodeRef, api.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())
return err
}
}

View File

@ -21,7 +21,7 @@ import (
dockerref "github.com/docker/distribution/reference"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
@ -59,13 +59,13 @@ func NewImageManager(recorder record.EventRecorder, imageService kubecontainer.I
// shouldPullImage returns whether we should pull an image according to
// the presence and pull policy of the image.
func shouldPullImage(container *api.Container, imagePresent bool) bool {
if container.ImagePullPolicy == api.PullNever {
func shouldPullImage(container *v1.Container, imagePresent bool) bool {
if container.ImagePullPolicy == v1.PullNever {
return false
}
if container.ImagePullPolicy == api.PullAlways ||
(container.ImagePullPolicy == api.PullIfNotPresent && (!imagePresent)) {
if container.ImagePullPolicy == v1.PullAlways ||
(container.ImagePullPolicy == v1.PullIfNotPresent && (!imagePresent)) {
return true
}
@ -73,7 +73,7 @@ func shouldPullImage(container *api.Container, imagePresent bool) bool {
}
// records an event using ref, event msg. log to glog using prefix, msg, logFn
func (m *imageManager) logIt(ref *api.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
func (m *imageManager) logIt(ref *v1.ObjectReference, eventtype, event, prefix, msg string, logFn func(args ...interface{})) {
if ref != nil {
m.recorder.Event(ref, eventtype, event, msg)
} else {
@ -82,7 +82,7 @@ func (m *imageManager) logIt(ref *api.ObjectReference, eventtype, event, prefix,
}
// EnsureImageExists pulls the image for the specified pod and container.
func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string) {
func (m *imageManager) EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string) {
logPrefix := fmt.Sprintf("%s/%s", pod.Name, container.Image)
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
@ -93,7 +93,7 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container,
image, err := applyDefaultImageTag(container.Image)
if err != nil {
msg := fmt.Sprintf("Failed to apply default image tag %q: %v", container.Image, err)
m.logIt(ref, api.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrInvalidImageName, msg
}
@ -101,18 +101,18 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container,
present, err := m.imageService.IsImagePresent(spec)
if err != nil {
msg := fmt.Sprintf("Failed to inspect image %q: %v", container.Image, err)
m.logIt(ref, api.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
m.logIt(ref, v1.EventTypeWarning, events.FailedToInspectImage, logPrefix, msg, glog.Warning)
return ErrImageInspect, msg
}
if !shouldPullImage(container, present) {
if present {
msg := fmt.Sprintf("Container image %q already present on machine", container.Image)
m.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info)
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, msg, glog.Info)
return nil, ""
} else {
msg := fmt.Sprintf("Container image %q is not present with pull policy of Never", container.Image)
m.logIt(ref, api.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
m.logIt(ref, v1.EventTypeWarning, events.ErrImageNeverPullPolicy, logPrefix, msg, glog.Warning)
return ErrImageNeverPull, msg
}
}
@ -120,14 +120,14 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container,
backOffKey := fmt.Sprintf("%s_%s", pod.UID, container.Image)
if m.backOff.IsInBackOffSinceUpdate(backOffKey, m.backOff.Clock.Now()) {
msg := fmt.Sprintf("Back-off pulling image %q", container.Image)
m.logIt(ref, api.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info)
m.logIt(ref, v1.EventTypeNormal, events.BackOffPullImage, logPrefix, msg, glog.Info)
return ErrImagePullBackOff, msg
}
m.logIt(ref, api.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
m.logIt(ref, v1.EventTypeNormal, events.PullingImage, logPrefix, fmt.Sprintf("pulling image %q", container.Image), glog.Info)
errChan := make(chan error)
m.puller.pullImage(spec, pullSecrets, errChan)
if err := <-errChan; err != nil {
m.logIt(ref, api.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
m.logIt(ref, v1.EventTypeWarning, events.FailedToPullImage, logPrefix, fmt.Sprintf("Failed to pull image %q: %v", container.Image, err), glog.Warning)
m.backOff.Next(backOffKey, m.backOff.Clock.Now())
if err == RegistryUnavailable {
msg := fmt.Sprintf("image pull failed for %s because the registry is unavailable.", container.Image)
@ -136,7 +136,7 @@ func (m *imageManager) EnsureImageExists(pod *api.Pod, container *api.Container,
return ErrImagePull, err.Error()
}
}
m.logIt(ref, api.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
m.logIt(ref, v1.EventTypeNormal, events.PulledImage, logPrefix, fmt.Sprintf("Successfully pulled image %q", container.Image), glog.Info)
m.backOff.GC()
return nil, ""
}

View File

@ -22,7 +22,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
. "k8s.io/kubernetes/pkg/kubelet/container"
ctest "k8s.io/kubernetes/pkg/kubelet/container/testing"
@ -31,8 +31,8 @@ import (
)
func TestParallelPuller(t *testing.T) {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test-ns",
UID: "bar",
@ -42,7 +42,7 @@ func TestParallelPuller(t *testing.T) {
cases := []struct {
containerImage string
policy api.PullPolicy
policy v1.PullPolicy
calledFunctions []string
inspectErr error
pullerErr error
@ -50,7 +50,7 @@ func TestParallelPuller(t *testing.T) {
}{
{ // pull missing image
containerImage: "missing_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: nil,
@ -58,35 +58,35 @@ func TestParallelPuller(t *testing.T) {
{ // image present, don't pull
containerImage: "present_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{nil, nil, nil}},
// image present, pull it
{containerImage: "present_image",
policy: api.PullAlways,
policy: v1.PullAlways,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{nil, nil, nil}},
// missing image, error PullNever
{containerImage: "missing_image",
policy: api.PullNever,
policy: v1.PullNever,
calledFunctions: []string{"IsImagePresent"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}},
// missing image, unable to inspect
{containerImage: "missing_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent"},
inspectErr: errors.New("unknown inspectError"),
pullerErr: nil,
expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}},
// missing image, unable to fetch
{containerImage: "typo_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: errors.New("404"),
@ -94,7 +94,7 @@ func TestParallelPuller(t *testing.T) {
}
for i, c := range cases {
container := &api.Container{
container := &v1.Container{
Name: "container_name",
Image: c.containerImage,
ImagePullPolicy: c.policy,
@ -122,8 +122,8 @@ func TestParallelPuller(t *testing.T) {
}
func TestSerializedPuller(t *testing.T) {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test-ns",
UID: "bar",
@ -133,7 +133,7 @@ func TestSerializedPuller(t *testing.T) {
cases := []struct {
containerImage string
policy api.PullPolicy
policy v1.PullPolicy
calledFunctions []string
inspectErr error
pullerErr error
@ -141,7 +141,7 @@ func TestSerializedPuller(t *testing.T) {
}{
{ // pull missing image
containerImage: "missing_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: nil,
@ -149,35 +149,35 @@ func TestSerializedPuller(t *testing.T) {
{ // image present, don't pull
containerImage: "present_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{nil, nil, nil}},
// image present, pull it
{containerImage: "present_image",
policy: api.PullAlways,
policy: v1.PullAlways,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{nil, nil, nil}},
// missing image, error PullNever
{containerImage: "missing_image",
policy: api.PullNever,
policy: v1.PullNever,
calledFunctions: []string{"IsImagePresent"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}},
// missing image, unable to inspect
{containerImage: "missing_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent"},
inspectErr: errors.New("unknown inspectError"),
pullerErr: nil,
expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}},
// missing image, unable to fetch
{containerImage: "typo_image",
policy: api.PullIfNotPresent,
policy: v1.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: errors.New("404"),
@ -185,7 +185,7 @@ func TestSerializedPuller(t *testing.T) {
}
for i, c := range cases {
container := &api.Container{
container := &v1.Container{
Name: "container_name",
Image: c.containerImage,
ImagePullPolicy: c.policy,

View File

@ -19,13 +19,13 @@ package images
import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/wait"
)
type imagePuller interface {
pullImage(kubecontainer.ImageSpec, []api.Secret, chan<- error)
pullImage(kubecontainer.ImageSpec, []v1.Secret, chan<- error)
}
var _, _ imagePuller = &parallelImagePuller{}, &serialImagePuller{}
@ -38,7 +38,7 @@ func newParallelImagePuller(imageService kubecontainer.ImageService) imagePuller
return &parallelImagePuller{imageService}
}
func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []api.Secret, errChan chan<- error) {
func (pip *parallelImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, errChan chan<- error) {
go func() {
errChan <- pip.imageService.PullImage(spec, pullSecrets)
}()
@ -60,11 +60,11 @@ func newSerialImagePuller(imageService kubecontainer.ImageService) imagePuller {
type imagePullRequest struct {
spec kubecontainer.ImageSpec
pullSecrets []api.Secret
pullSecrets []v1.Secret
errChan chan<- error
}
func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []api.Secret, errChan chan<- error) {
func (sip *serialImagePuller) pullImage(spec kubecontainer.ImageSpec, pullSecrets []v1.Secret, errChan chan<- error) {
sip.pullRequests <- &imagePullRequest{
spec: spec,
pullSecrets: pullSecrets,

View File

@ -19,7 +19,7 @@ package images
import (
"errors"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
var (
@ -49,7 +49,7 @@ var (
// Implementations are expected to be thread safe.
type ImageManager interface {
// EnsureImageExists ensures that image specified in `container` exists.
EnsureImageExists(pod *api.Pod, container *api.Container, pullSecrets []api.Secret) (error, string)
EnsureImageExists(pod *v1.Pod, container *v1.Container, pullSecrets []v1.Secret) (error, string)
// TODO(ronl): consolidating image managing and deleting operation in this interface
}

View File

@ -33,10 +33,11 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields"
@ -152,11 +153,11 @@ const (
// SyncHandler is an interface implemented by Kubelet, for testability
type SyncHandler interface {
HandlePodAdditions(pods []*api.Pod)
HandlePodUpdates(pods []*api.Pod)
HandlePodRemoves(pods []*api.Pod)
HandlePodReconcile(pods []*api.Pod)
HandlePodSyncs(pods []*api.Pod)
HandlePodAdditions(pods []*v1.Pod)
HandlePodUpdates(pods []*v1.Pod)
HandlePodRemoves(pods []*v1.Pod)
HandlePodReconcile(pods []*v1.Pod)
HandlePodSyncs(pods []*v1.Pod)
HandlePodCleanups() error
}
@ -330,8 +331,8 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
MaxContainers: int(kubeCfg.MaxContainerCount),
}
daemonEndpoints := &api.NodeDaemonEndpoints{
KubeletEndpoint: api.DaemonEndpoint{Port: kubeCfg.Port},
daemonEndpoints := &v1.NodeDaemonEndpoints{
KubeletEndpoint: v1.DaemonEndpoint{Port: kubeCfg.Port},
}
imageGCPolicy := images.ImageGCPolicy{
@ -373,16 +374,16 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
serviceStore := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
if kubeClient != nil {
serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", api.NamespaceAll, fields.Everything())
cache.NewReflector(serviceLW, &api.Service{}, serviceStore, 0).Run()
serviceLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "services", v1.NamespaceAll, fields.Everything())
cache.NewReflector(serviceLW, &v1.Service{}, serviceStore, 0).Run()
}
serviceLister := &cache.StoreToServiceLister{Indexer: serviceStore}
nodeStore := cache.NewStore(cache.MetaNamespaceKeyFunc)
if kubeClient != nil {
fieldSelector := fields.Set{api.ObjectNameField: string(nodeName)}.AsSelector()
nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", api.NamespaceAll, fieldSelector)
cache.NewReflector(nodeLW, &api.Node{}, nodeStore, 0).Run()
nodeLW := cache.NewListWatchFromClient(kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fieldSelector)
cache.NewReflector(nodeLW, &v1.Node{}, nodeStore, 0).Run()
}
nodeLister := &cache.StoreToNodeLister{Store: nodeStore}
nodeInfo := &predicates.CachedNodeInfo{StoreToNodeLister: nodeLister}
@ -390,7 +391,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
// TODO: get the real node object of ourself,
// and use the real node name and UID.
// TODO: what is namespace for node?
nodeRef := &api.ObjectReference{
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string(nodeName),
UID: types.UID(nodeName),
@ -764,14 +765,14 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
if err != nil {
return nil, err
}
safeWhitelist, err := sysctl.NewWhitelist(sysctl.SafeSysctlWhitelist(), api.SysctlsPodAnnotationKey)
safeWhitelist, err := sysctl.NewWhitelist(sysctl.SafeSysctlWhitelist(), v1.SysctlsPodAnnotationKey)
if err != nil {
return nil, err
}
// Safe, whitelisted sysctls can always be used as unsafe sysctls in the spec
// Hence, we concatenate those two lists.
safeAndUnsafeSysctls := append(sysctl.SafeSysctlWhitelist(), kubeCfg.AllowedUnsafeSysctls...)
unsafeWhitelist, err := sysctl.NewWhitelist(safeAndUnsafeSysctls, api.UnsafeSysctlsPodAnnotationKey)
unsafeWhitelist, err := sysctl.NewWhitelist(safeAndUnsafeSysctls, v1.UnsafeSysctlsPodAnnotationKey)
if err != nil {
return nil, err
}
@ -803,11 +804,11 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
}
type serviceLister interface {
List(labels.Selector) ([]*api.Service, error)
List(labels.Selector) ([]*v1.Service, error)
}
type nodeLister interface {
List() (machines api.NodeList, err error)
List() (machines v1.NodeList, err error)
}
// Kubelet is the main kubelet implementation.
@ -928,7 +929,7 @@ type Kubelet struct {
autoDetectCloudProvider bool
// Reference to this node.
nodeRef *api.ObjectReference
nodeRef *v1.ObjectReference
// Container runtime.
containerRuntime kubecontainer.Runtime
@ -1012,7 +1013,7 @@ type Kubelet struct {
cpuCFSQuota bool
// Information about the ports which are opened by daemons on Node running this Kubelet server.
daemonEndpoints *api.NodeDaemonEndpoints
daemonEndpoints *v1.NodeDaemonEndpoints
// A queue used to trigger pod workers.
workQueue queue.WorkQueue
@ -1049,7 +1050,7 @@ type Kubelet struct {
babysitDaemons bool
// handlers called during the tryUpdateNodeStatus cycle
setNodeStatusFuncs []func(*api.Node) error
setNodeStatusFuncs []func(*v1.Node) error
// TODO: think about moving this to be centralized in PodWorkers in follow-on.
// the list of handlers to call during pod admission.
@ -1125,7 +1126,7 @@ func (kl *Kubelet) StartGarbageCollection() {
go wait.Until(func() {
if err := kl.containerGC.GarbageCollect(kl.sourcesReady.AllReady()); err != nil {
glog.Errorf("Container garbage collection failed: %v", err)
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.ContainerGCFailed, err.Error())
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ContainerGCFailed, err.Error())
loggedContainerGCFailure = true
} else {
var vLevel glog.Level = 4
@ -1142,7 +1143,7 @@ func (kl *Kubelet) StartGarbageCollection() {
go wait.Until(func() {
if err := kl.imageManager.GarbageCollect(); err != nil {
glog.Errorf("Image garbage collection failed: %v", err)
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.ImageGCFailed, err.Error())
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.ImageGCFailed, err.Error())
loggedImageGCFailure = true
} else {
var vLevel glog.Level = 4
@ -1223,7 +1224,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
glog.Warning("No api server defined - no node status update will be sent.")
}
if err := kl.initializeModules(); err != nil {
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.KubeletSetupFailed, err.Error())
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
glog.Error(err)
kl.runtimeState.setInitError(err)
}
@ -1265,7 +1266,7 @@ func (kl *Kubelet) GetKubeClient() clientset.Interface {
// GetClusterDNS returns a list of the DNS servers and a list of the DNS search
// domains of the cluster.
func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) {
func (kl *Kubelet) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) {
var hostDNS, hostSearch []string
// Get host DNS settings
if kl.resolverConfig != "" {
@ -1280,13 +1281,13 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) {
return nil, nil, err
}
}
useClusterFirstPolicy := pod.Spec.DNSPolicy == api.DNSClusterFirst
useClusterFirstPolicy := pod.Spec.DNSPolicy == v1.DNSClusterFirst
if useClusterFirstPolicy && kl.clusterDNS == nil {
// clusterDNS is not known.
// pod with ClusterDNSFirst Policy cannot be created
kl.recorder.Eventf(pod, api.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy)
kl.recorder.Eventf(pod, v1.EventTypeWarning, "MissingClusterDNS", "kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy)
log := fmt.Sprintf("kubelet does not have ClusterDNS IP configured and cannot create Pod using %q policy. pod: %q. Falling back to DNSDefault policy.", pod.Spec.DNSPolicy, format.Pod(pod))
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, "MissingClusterDNS", log)
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, "MissingClusterDNS", log)
// fallback to DNSDefault
useClusterFirstPolicy = false
@ -1331,7 +1332,7 @@ func (kl *Kubelet) GetClusterDNS(pod *api.Pod) ([]string, []string, error) {
//
// The workflow is:
// * If the pod is being created, record pod worker start latency
// * Call generateAPIPodStatus to prepare an api.PodStatus for the pod
// * Call generateAPIPodStatus to prepare an v1.PodStatus for the pod
// * If the pod is being seen as running for the first time, record pod
// start latency
// * Update the status of the pod in the status manager
@ -1398,7 +1399,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
// Record the time it takes for the pod to become running.
existingStatus, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok || existingStatus.Phase == api.PodPending && apiPodStatus.Phase == api.PodRunning &&
if !ok || existingStatus.Phase == v1.PodPending && apiPodStatus.Phase == v1.PodRunning &&
!firstSeenTime.IsZero() {
metrics.PodStartLatency.Observe(metrics.SinceInMicroseconds(firstSeenTime))
}
@ -1426,7 +1427,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
kl.statusManager.SetPodStatus(pod, apiPodStatus)
// Kill pod if it should not be running
if !runnable.Admit || pod.DeletionTimestamp != nil || apiPodStatus.Phase == api.PodFailed {
if !runnable.Admit || pod.DeletionTimestamp != nil || apiPodStatus.Phase == v1.PodFailed {
var syncErr error
if err := kl.killPod(pod, nil, podStatus, nil); err != nil {
syncErr = fmt.Errorf("error killing pod: %v", err)
@ -1478,7 +1479,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
// expected to run only once and if the kubelet is restarted then
// they are not expected to run again.
// We don't create and apply updates to cgroup if its a run once pod and was killed above
if !(podKilled && pod.Spec.RestartPolicy == api.RestartPolicyNever) {
if !(podKilled && pod.Spec.RestartPolicy == v1.RestartPolicyNever) {
if err := pcm.EnsureExists(pod); err != nil {
return fmt.Errorf("failed to ensure that the pod: %v cgroups exist and are correctly applied: %v", pod.UID, err)
}
@ -1517,7 +1518,7 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
// Wait for volumes to attach/mount
if err := kl.volumeManager.WaitForAttachAndMount(pod); err != nil {
kl.recorder.Eventf(pod, api.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
kl.recorder.Eventf(pod, v1.EventTypeWarning, events.FailedMountVolume, "Unable to mount volumes for pod %q: %v", format.Pod(pod), err)
glog.Errorf("Unable to mount volumes for pod %q: %v; skipping pod", format.Pod(pod), err)
return err
}
@ -1548,13 +1549,13 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
}
if egress != nil || ingress != nil {
if podUsesHostNetwork(pod) {
kl.recorder.Event(pod, api.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
kl.recorder.Event(pod, v1.EventTypeWarning, events.HostNetworkNotSupported, "Bandwidth shaping is not currently supported on the host network")
} else if kl.shaper != nil {
if len(apiPodStatus.PodIP) > 0 {
err = kl.shaper.ReconcileCIDR(fmt.Sprintf("%s/32", apiPodStatus.PodIP), egress, ingress)
}
} else {
kl.recorder.Event(pod, api.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
kl.recorder.Event(pod, v1.EventTypeWarning, events.UndefinedShaper, "Pod requests bandwidth shaping, but the shaper is undefined")
}
}
@ -1564,14 +1565,14 @@ func (kl *Kubelet) syncPod(o syncPodOptions) error {
// Get pods which should be resynchronized. Currently, the following pod should be resynchronized:
// * pod whose work is ready.
// * internal modules that request sync of a pod.
func (kl *Kubelet) getPodsToSync() []*api.Pod {
func (kl *Kubelet) getPodsToSync() []*v1.Pod {
allPods := kl.podManager.GetPods()
podUIDs := kl.workQueue.GetWork()
podUIDSet := sets.NewString()
for _, podUID := range podUIDs {
podUIDSet.Insert(string(podUID))
}
var podsToSync []*api.Pod
var podsToSync []*v1.Pod
for _, pod := range allPods {
if podUIDSet.Has(string(pod.UID)) {
// The work of the pod is ready
@ -1594,7 +1595,7 @@ func (kl *Kubelet) getPodsToSync() []*api.Pod {
//
// deletePod returns an error if not all sources are ready or the pod is not
// found in the runtime cache.
func (kl *Kubelet) deletePod(pod *api.Pod) error {
func (kl *Kubelet) deletePod(pod *v1.Pod) error {
if pod == nil {
return fmt.Errorf("deletePod does not allow nil pod")
}
@ -1647,10 +1648,10 @@ func (kl *Kubelet) isOutOfDisk() bool {
// rejectPod records an event about the pod with the given reason and message,
// and updates the pod to the failed phase in the status manage.
func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) {
kl.recorder.Eventf(pod, api.EventTypeWarning, reason, message)
kl.statusManager.SetPodStatus(pod, api.PodStatus{
Phase: api.PodFailed,
func (kl *Kubelet) rejectPod(pod *v1.Pod, reason, message string) {
kl.recorder.Eventf(pod, v1.EventTypeWarning, reason, message)
kl.statusManager.SetPodStatus(pod, v1.PodStatus{
Phase: v1.PodFailed,
Reason: reason,
Message: "Pod " + message})
}
@ -1660,7 +1661,7 @@ func (kl *Kubelet) rejectPod(pod *api.Pod, reason, message string) {
// The function returns a boolean value indicating whether the pod
// can be admitted, a brief single-word reason and a message explaining why
// the pod cannot be admitted.
func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, string) {
func (kl *Kubelet) canAdmitPod(pods []*v1.Pod, pod *v1.Pod) (bool, string, string) {
// the kubelet will invoke each pod admit handler in sequence
// if any handler rejects, the pod is rejected.
// TODO: move out of disk check into a pod admitter
@ -1681,7 +1682,7 @@ func (kl *Kubelet) canAdmitPod(pods []*api.Pod, pod *api.Pod) (bool, string, str
return true, "", ""
}
func (kl *Kubelet) canRunPod(pod *api.Pod) lifecycle.PodAdmitResult {
func (kl *Kubelet) canRunPod(pod *v1.Pod) lifecycle.PodAdmitResult {
attrs := &lifecycle.PodAdmitAttributes{Pod: pod}
// Get "OtherPods". Rejected pods are failed, so only include admitted pods that are alive.
attrs.OtherPods = kl.filterOutTerminatedPods(kl.podManager.GetPods())
@ -1813,7 +1814,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
// PLEG event for a pod; sync it.
if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {
glog.V(2).Infof("SyncLoop (PLEG): %q, event: %#v", format.Pod(pod), e)
handler.HandlePodSyncs([]*api.Pod{pod})
handler.HandlePodSyncs([]*v1.Pod{pod})
} else {
// If the pod no longer exists, ignore the event.
glog.V(4).Infof("SyncLoop (PLEG): ignore irrelevant event: %#v", e)
@ -1846,7 +1847,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
break
}
glog.V(1).Infof("SyncLoop (container unhealthy): %q", format.Pod(pod))
handler.HandlePodSyncs([]*api.Pod{pod})
handler.HandlePodSyncs([]*v1.Pod{pod})
}
case <-housekeepingCh:
if !kl.sourcesReady.AllReady() {
@ -1866,7 +1867,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
// dispatchWork starts the asynchronous sync of the pod in a pod worker.
// If the pod is terminated, dispatchWork
func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubetypes.SyncPodType, mirrorPod *api.Pod, start time.Time) {
func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mirrorPod *v1.Pod, start time.Time) {
if kl.podIsTerminated(pod) {
if pod.DeletionTimestamp != nil {
// If the pod is in a terminated state, there is no pod worker to
@ -1895,7 +1896,7 @@ func (kl *Kubelet) dispatchWork(pod *api.Pod, syncType kubetypes.SyncPodType, mi
}
// TODO: handle mirror pods in a separate component (issue #17251)
func (kl *Kubelet) handleMirrorPod(mirrorPod *api.Pod, start time.Time) {
func (kl *Kubelet) handleMirrorPod(mirrorPod *v1.Pod, start time.Time) {
// Mirror pod ADD/UPDATE/DELETE operations are considered an UPDATE to the
// corresponding static pod. Send update to the pod worker if the static
// pod exists.
@ -1906,7 +1907,7 @@ func (kl *Kubelet) handleMirrorPod(mirrorPod *api.Pod, start time.Time) {
// HandlePodAdditions is the callback in SyncHandler for pods being added from
// a config source.
func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) {
func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
start := kl.clock.Now()
sort.Sort(sliceutils.PodsByCreationTime(pods))
for _, pod := range pods {
@ -1934,7 +1935,7 @@ func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) {
// HandlePodUpdates is the callback in the SyncHandler interface for pods
// being updated from a config source.
func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod) {
func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
kl.podManager.UpdatePod(pod)
@ -1951,7 +1952,7 @@ func (kl *Kubelet) HandlePodUpdates(pods []*api.Pod) {
// HandlePodRemoves is the callback in the SyncHandler interface for pods
// being removed from a config source.
func (kl *Kubelet) HandlePodRemoves(pods []*api.Pod) {
func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
kl.podManager.DeletePod(pod)
@ -1970,7 +1971,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*api.Pod) {
// HandlePodReconcile is the callback in the SyncHandler interface for pods
// that should be reconciled.
func (kl *Kubelet) HandlePodReconcile(pods []*api.Pod) {
func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) {
for _, pod := range pods {
// Update the pod in pod manager, status manager will do periodically reconcile according
// to the pod manager.
@ -1987,7 +1988,7 @@ func (kl *Kubelet) HandlePodReconcile(pods []*api.Pod) {
// HandlePodSyncs is the callback in the syncHandler interface for pods
// that should be dispatched to pod workers for sync.
func (kl *Kubelet) HandlePodSyncs(pods []*api.Pod) {
func (kl *Kubelet) HandlePodSyncs(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
@ -2054,7 +2055,7 @@ func (kl *Kubelet) updateRuntimeUp() {
// updateCloudProviderFromMachineInfo updates the node's provider ID field
// from the given cadvisor machine info.
func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *api.Node, info *cadvisorapi.MachineInfo) {
func (kl *Kubelet) updateCloudProviderFromMachineInfo(node *v1.Node, info *cadvisorapi.MachineInfo) {
if info.CloudProvider != cadvisorapi.UnknownProvider &&
info.CloudProvider != cadvisorapi.Baremetal {
// The cloud providers from pkg/cloudprovider/providers/* that update ProviderID
@ -2074,7 +2075,7 @@ func (kl *Kubelet) GetConfiguration() componentconfig.KubeletConfiguration {
// BirthCry sends an event that the kubelet has started up.
func (kl *Kubelet) BirthCry() {
// Make an event that kubelet restarted.
kl.recorder.Eventf(kl.nodeRef, api.EventTypeNormal, events.StartingKubelet, "Starting kubelet.")
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeNormal, events.StartingKubelet, "Starting kubelet.")
}
// StreamingConnectionIdleTimeout returns the timeout for streaming connections to the HTTP server.
@ -2117,12 +2118,12 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
// parseResourceList parses the given configuration map into an API
// ResourceList or returns an error.
func parseResourceList(m utilconfig.ConfigurationMap) (api.ResourceList, error) {
rl := make(api.ResourceList)
func parseResourceList(m utilconfig.ConfigurationMap) (v1.ResourceList, error) {
rl := make(v1.ResourceList)
for k, v := range m {
switch api.ResourceName(k) {
switch v1.ResourceName(k) {
// Only CPU and memory resources are supported.
case api.ResourceCPU, api.ResourceMemory:
case v1.ResourceCPU, v1.ResourceMemory:
q, err := resource.ParseQuantity(v)
if err != nil {
return nil, err
@ -2130,7 +2131,7 @@ func parseResourceList(m utilconfig.ConfigurationMap) (api.ResourceList, error)
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[api.ResourceName(k)] = q
rl[v1.ResourceName(k)] = q
default:
return nil, fmt.Errorf("cannot reserve %q resource", k)
}

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/cmd/kubelet/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
@ -142,21 +142,21 @@ func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string {
// GetPods returns all pods bound to the kubelet and their spec, and the mirror
// pods.
func (kl *Kubelet) GetPods() []*api.Pod {
func (kl *Kubelet) GetPods() []*v1.Pod {
return kl.podManager.GetPods()
}
// GetRunningPods returns all pods running on kubelet from looking at the
// container runtime cache. This function converts kubecontainer.Pod to
// api.Pod, so only the fields that exist in both kubecontainer.Pod and
// api.Pod are considered meaningful.
func (kl *Kubelet) GetRunningPods() ([]*api.Pod, error) {
// v1.Pod, so only the fields that exist in both kubecontainer.Pod and
// v1.Pod are considered meaningful.
func (kl *Kubelet) GetRunningPods() ([]*v1.Pod, error) {
pods, err := kl.runtimeCache.GetPods()
if err != nil {
return nil, err
}
apiPods := make([]*api.Pod, 0, len(pods))
apiPods := make([]*v1.Pod, 0, len(pods))
for _, pod := range pods {
apiPods = append(apiPods, pod.ToAPIPod())
}
@ -165,13 +165,13 @@ func (kl *Kubelet) GetRunningPods() ([]*api.Pod, error) {
// GetPodByFullName gets the pod with the given 'full' name, which
// incorporates the namespace as well as whether the pod was found.
func (kl *Kubelet) GetPodByFullName(podFullName string) (*api.Pod, bool) {
func (kl *Kubelet) GetPodByFullName(podFullName string) (*v1.Pod, bool) {
return kl.podManager.GetPodByFullName(podFullName)
}
// GetPodByName provides the first pod that matches namespace and name, as well
// as whether the pod was found.
func (kl *Kubelet) GetPodByName(namespace, name string) (*api.Pod, bool) {
func (kl *Kubelet) GetPodByName(namespace, name string) (*v1.Pod, bool) {
return kl.podManager.GetPodByName(namespace, name)
}
@ -187,19 +187,19 @@ func (kl *Kubelet) GetRuntime() kubecontainer.Runtime {
}
// GetNode returns the node info for the configured node name of this Kubelet.
func (kl *Kubelet) GetNode() (*api.Node, error) {
func (kl *Kubelet) GetNode() (*v1.Node, error) {
if kl.standaloneMode {
return kl.initialNode()
}
return kl.nodeInfo.GetNodeInfo(string(kl.nodeName))
}
// getNodeAnyWay() must return a *api.Node which is required by RunGeneralPredicates().
// The *api.Node is obtained as follows:
// getNodeAnyWay() must return a *v1.Node which is required by RunGeneralPredicates().
// The *v1.Node is obtained as follows:
// Return kubelet's nodeInfo for this node, except on error or if in standalone mode,
// in which case return a manufactured nodeInfo representing a node with no pods,
// zero capacity, and the default labels.
func (kl *Kubelet) getNodeAnyWay() (*api.Node, error) {
func (kl *Kubelet) getNodeAnyWay() (*v1.Node, error) {
if !kl.standaloneMode {
if n, err := kl.nodeInfo.GetNodeInfo(string(kl.nodeName)); err == nil {
return n, nil
@ -235,7 +235,7 @@ func (kl *Kubelet) getHostIPAnyWay() (net.IP, error) {
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 {
func (kl *Kubelet) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return kl.volumeManager.GetExtraSupplementalGroupsForPod(pod)
}

View File

@ -23,7 +23,7 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/kubelet/network"
"k8s.io/kubernetes/pkg/util/bandwidth"
@ -146,7 +146,7 @@ func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []s
// cleanupBandwidthLimits updates the status of bandwidth-limited containers
// and ensures that only the appropriate CIDRs are active on the node.
func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
func (kl *Kubelet) cleanupBandwidthLimits(allPods []*v1.Pod) error {
if kl.shaper == nil {
return nil
}
@ -174,7 +174,7 @@ func (kl *Kubelet) cleanupBandwidthLimits(allPods []*api.Pod) error {
}
status = kl.generateAPIPodStatus(pod, s)
}
if status.Phase == api.PodRunning {
if status.Phase == v1.PodRunning {
possibleCIDRs.Insert(fmt.Sprintf("%s/32", status.PodIP))
}
}

View File

@ -22,7 +22,7 @@ import (
"strings"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/bandwidth"
)
@ -137,7 +137,7 @@ func TestParseResolvConf(t *testing.T) {
}
func TestCleanupBandwidthLimits(t *testing.T) {
testPod := func(name, ingress string) *api.Pod {
testPod := func(name, ingress string) *v1.Pod {
pod := podWithUidNameNs("", name, "")
if len(ingress) != 0 {
@ -150,18 +150,18 @@ func TestCleanupBandwidthLimits(t *testing.T) {
// TODO(random-liu): We removed the test case for pod status not cached here. We should add a higher
// layer status getter function and test that function instead.
tests := []struct {
status *api.PodStatus
pods []*api.Pod
status *v1.PodStatus
pods []*v1.Pod
inputCIDRs []string
expectResetCIDRs []string
name string
}{
{
status: &api.PodStatus{
status: &v1.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodRunning,
Phase: v1.PodRunning,
},
pods: []*api.Pod{
pods: []*v1.Pod{
testPod("foo", "10M"),
testPod("bar", ""),
},
@ -170,11 +170,11 @@ func TestCleanupBandwidthLimits(t *testing.T) {
name: "pod running",
},
{
status: &api.PodStatus{
status: &v1.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
Phase: v1.PodFailed,
},
pods: []*api.Pod{
pods: []*v1.Pod{
testPod("foo", "10M"),
testPod("bar", ""),
},
@ -183,11 +183,11 @@ func TestCleanupBandwidthLimits(t *testing.T) {
name: "pod not running",
},
{
status: &api.PodStatus{
status: &v1.PodStatus{
PodIP: "1.2.3.4",
Phase: api.PodFailed,
Phase: v1.PodFailed,
},
pods: []*api.Pod{
pods: []*v1.Pod{
testPod("foo", ""),
testPod("bar", ""),
},

View File

@ -26,10 +26,10 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
@ -67,7 +67,7 @@ func (kl *Kubelet) registerWithApiServer() {
node, err := kl.initialNode()
if err != nil {
glog.Errorf("Unable to construct api.Node object for kubelet: %v", err)
glog.Errorf("Unable to construct v1.Node object for kubelet: %v", err)
continue
}
@ -88,7 +88,7 @@ func (kl *Kubelet) registerWithApiServer() {
// persistent volumes for the node. If a node of the same name exists but has
// a different externalID value, it attempts to delete that node so that a
// later attempt can recreate it.
func (kl *Kubelet) tryRegisterWithApiServer(node *api.Node) bool {
func (kl *Kubelet) tryRegisterWithApiServer(node *v1.Node) bool {
_, err := kl.kubeClient.Core().Nodes().Create(node)
if err == nil {
return true
@ -142,7 +142,7 @@ func (kl *Kubelet) tryRegisterWithApiServer(node *api.Node) bool {
// reconcileCMADAnnotationWithExistingNode reconciles the controller-managed
// attach-detach annotation on a new node and the existing node, returning
// whether the existing node must be updated.
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *api.Node) bool {
func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *v1.Node) bool {
var (
existingCMAAnnotation = existingNode.Annotations[volumehelper.ControllerManagedAttachAnnotation]
newCMAAnnotation, newSet = node.Annotations[volumehelper.ControllerManagedAttachAnnotation]
@ -169,11 +169,11 @@ func (kl *Kubelet) reconcileCMADAnnotationWithExistingNode(node, existingNode *a
return true
}
// initialNode constructs the initial api.Node for this Kubelet, incorporating node
// initialNode constructs the initial v1.Node for this Kubelet, incorporating node
// labels, information from the cloud provider, and Kubelet configuration.
func (kl *Kubelet) initialNode() (*api.Node, error) {
node := &api.Node{
ObjectMeta: api.ObjectMeta{
func (kl *Kubelet) initialNode() (*v1.Node, error) {
node := &v1.Node{
ObjectMeta: v1.ObjectMeta{
Name: string(kl.nodeName),
Labels: map[string]string{
unversioned.LabelHostname: kl.hostname,
@ -181,15 +181,15 @@ func (kl *Kubelet) initialNode() (*api.Node, error) {
unversioned.LabelArch: goRuntime.GOARCH,
},
},
Spec: api.NodeSpec{
Spec: v1.NodeSpec{
Unschedulable: !kl.registerSchedulable,
},
}
// Initially, set NodeNetworkUnavailable to true.
if kl.providerRequiresNetworkingConfiguration() {
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
Type: api.NodeNetworkUnavailable,
Status: api.ConditionTrue,
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
Reason: "NoRouteCreated",
Message: "Node created without a route",
LastTransitionTime: unversioned.NewTime(kl.clock.Now()),
@ -320,8 +320,8 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
// field selector for the name of the node (field selectors with
// specified name are handled efficiently by apiserver). Once
// apiserver supports GET from cache, change it here.
opts := api.ListOptions{
FieldSelector: fields.Set{"metadata.name": string(kl.nodeName)}.AsSelector(),
opts := v1.ListOptions{
FieldSelector: fields.Set{"metadata.name": string(kl.nodeName)}.AsSelector().String(),
ResourceVersion: "0",
}
nodes, err := kl.kubeClient.Core().Nodes().List(opts)
@ -359,7 +359,7 @@ func (kl *Kubelet) recordNodeStatusEvent(eventtype, event string) {
}
// Set IP and hostname addresses for the node.
func (kl *Kubelet) setNodeAddress(node *api.Node) error {
func (kl *Kubelet) setNodeAddress(node *v1.Node) error {
if kl.nodeIP != nil {
if err := kl.validateNodeIP(); err != nil {
return fmt.Errorf("failed to validate nodeIP: %v", err)
@ -383,9 +383,9 @@ func (kl *Kubelet) setNodeAddress(node *api.Node) error {
if kl.nodeIP != nil {
for _, nodeAddress := range nodeAddresses {
if nodeAddress.Address == kl.nodeIP.String() {
node.Status.Addresses = []api.NodeAddress{
node.Status.Addresses = []v1.NodeAddress{
{Type: nodeAddress.Type, Address: nodeAddress.Address},
{Type: api.NodeHostName, Address: kl.GetHostname()},
{Type: v1.NodeHostName, Address: kl.GetHostname()},
}
return nil
}
@ -395,15 +395,15 @@ func (kl *Kubelet) setNodeAddress(node *api.Node) error {
// Only add a NodeHostName address if the cloudprovider did not specify one
// (we assume the cloudprovider knows best)
var addressNodeHostName *api.NodeAddress
var addressNodeHostName *v1.NodeAddress
for i := range nodeAddresses {
if nodeAddresses[i].Type == api.NodeHostName {
if nodeAddresses[i].Type == v1.NodeHostName {
addressNodeHostName = &nodeAddresses[i]
break
}
}
if addressNodeHostName == nil {
hostnameAddress := api.NodeAddress{Type: api.NodeHostName, Address: kl.GetHostname()}
hostnameAddress := v1.NodeAddress{Type: v1.NodeHostName, Address: kl.GetHostname()}
nodeAddresses = append(nodeAddresses, hostnameAddress)
} else {
glog.V(2).Infof("Using Node Hostname from cloudprovider: %q", addressNodeHostName.Address)
@ -440,21 +440,21 @@ func (kl *Kubelet) setNodeAddress(node *api.Node) error {
// We tried everything we could, but the IP address wasn't fetchable; error out
return fmt.Errorf("can't get ip address of node %s. error: %v", node.Name, err)
} else {
node.Status.Addresses = []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: ipAddr.String()},
{Type: api.NodeInternalIP, Address: ipAddr.String()},
{Type: api.NodeHostName, Address: kl.GetHostname()},
node.Status.Addresses = []v1.NodeAddress{
{Type: v1.NodeLegacyHostIP, Address: ipAddr.String()},
{Type: v1.NodeInternalIP, Address: ipAddr.String()},
{Type: v1.NodeHostName, Address: kl.GetHostname()},
}
}
}
return nil
}
func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
func (kl *Kubelet) setNodeStatusMachineInfo(node *v1.Node) {
// Note: avoid blindly overwriting the capacity in case opaque
// resources are being advertised.
if node.Status.Capacity == nil {
node.Status.Capacity = api.ResourceList{}
node.Status.Capacity = v1.ResourceList{}
}
// TODO: Post NotReady if we cannot get MachineInfo from cAdvisor. This needs to start
@ -463,10 +463,10 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
if err != nil {
// TODO(roberthbailey): This is required for test-cmd.sh to pass.
// See if the test should be updated instead.
node.Status.Capacity[api.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI)
node.Status.Capacity[api.ResourceMemory] = resource.MustParse("0Gi")
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI)
node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI)
node.Status.Capacity[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI)
node.Status.Capacity[v1.ResourceMemory] = resource.MustParse("0Gi")
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(int64(kl.maxPods), resource.DecimalSI)
node.Status.Capacity[v1.ResourceNvidiaGPU] = *resource.NewQuantity(int64(kl.nvidiaGPUs), resource.DecimalSI)
glog.Errorf("Error getting machine info: %v", err)
} else {
@ -478,26 +478,26 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
}
if kl.podsPerCore > 0 {
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(math.Min(float64(info.NumCores*kl.podsPerCore), float64(kl.maxPods))), resource.DecimalSI)
} else {
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(
node.Status.Capacity[v1.ResourcePods] = *resource.NewQuantity(
int64(kl.maxPods), resource.DecimalSI)
}
node.Status.Capacity[api.ResourceNvidiaGPU] = *resource.NewQuantity(
node.Status.Capacity[v1.ResourceNvidiaGPU] = *resource.NewQuantity(
int64(kl.nvidiaGPUs), resource.DecimalSI)
if node.Status.NodeInfo.BootID != "" &&
node.Status.NodeInfo.BootID != info.BootID {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, api.EventTypeWarning, events.NodeRebooted,
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.NodeRebooted,
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
}
node.Status.NodeInfo.BootID = info.BootID
}
// Set Allocatable.
node.Status.Allocatable = make(api.ResourceList)
node.Status.Allocatable = make(v1.ResourceList)
for k, v := range node.Status.Capacity {
value := *(v.Copy())
if kl.reservation.System != nil {
@ -515,7 +515,7 @@ func (kl *Kubelet) setNodeStatusMachineInfo(node *api.Node) {
}
// Set versioninfo for the node.
func (kl *Kubelet) setNodeStatusVersionInfo(node *api.Node) {
func (kl *Kubelet) setNodeStatusVersionInfo(node *v1.Node) {
verinfo, err := kl.cadvisor.VersionInfo()
if err != nil {
glog.Errorf("Error getting version info: %v", err)
@ -537,14 +537,14 @@ func (kl *Kubelet) setNodeStatusVersionInfo(node *api.Node) {
}
// Set daemonEndpoints for the node.
func (kl *Kubelet) setNodeStatusDaemonEndpoints(node *api.Node) {
func (kl *Kubelet) setNodeStatusDaemonEndpoints(node *v1.Node) {
node.Status.DaemonEndpoints = *kl.daemonEndpoints
}
// Set images list for the node
func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
func (kl *Kubelet) setNodeStatusImages(node *v1.Node) {
// Update image list of this node
var imagesOnNode []api.ContainerImage
var imagesOnNode []v1.ContainerImage
containerImages, err := kl.imageManager.GetImageList()
if err != nil {
glog.Errorf("Error getting image list: %v", err)
@ -561,7 +561,7 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
if len(names) > maxNamesPerImageInNodeStatus {
names = names[0:maxNamesPerImageInNodeStatus]
}
imagesOnNode = append(imagesOnNode, api.ContainerImage{
imagesOnNode = append(imagesOnNode, v1.ContainerImage{
Names: names,
SizeBytes: image.Size,
})
@ -571,13 +571,13 @@ func (kl *Kubelet) setNodeStatusImages(node *api.Node) {
}
// Set the GOOS and GOARCH for this node
func (kl *Kubelet) setNodeStatusGoRuntime(node *api.Node) {
func (kl *Kubelet) setNodeStatusGoRuntime(node *v1.Node) {
node.Status.NodeInfo.OperatingSystem = goRuntime.GOOS
node.Status.NodeInfo.Architecture = goRuntime.GOARCH
}
// Set status for the node.
func (kl *Kubelet) setNodeStatusInfo(node *api.Node) {
func (kl *Kubelet) setNodeStatusInfo(node *v1.Node) {
kl.setNodeStatusMachineInfo(node)
kl.setNodeStatusVersionInfo(node)
kl.setNodeStatusDaemonEndpoints(node)
@ -586,25 +586,25 @@ func (kl *Kubelet) setNodeStatusInfo(node *api.Node) {
}
// Set Ready condition for the node.
func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
func (kl *Kubelet) setNodeReadyCondition(node *v1.Node) {
// NOTE(aaronlevy): NodeReady condition needs to be the last in the list of node conditions.
// This is due to an issue with version skewed kubelet and master components.
// ref: https://github.com/kubernetes/kubernetes/issues/16961
currentTime := unversioned.NewTime(kl.clock.Now())
var newNodeReadyCondition api.NodeCondition
var newNodeReadyCondition v1.NodeCondition
rs := append(kl.runtimeState.runtimeErrors(), kl.runtimeState.networkErrors()...)
if len(rs) == 0 {
newNodeReadyCondition = api.NodeCondition{
Type: api.NodeReady,
Status: api.ConditionTrue,
newNodeReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: "kubelet is posting ready status",
LastHeartbeatTime: currentTime,
}
} else {
newNodeReadyCondition = api.NodeCondition{
Type: api.NodeReady,
Status: api.ConditionFalse,
newNodeReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
Reason: "KubeletNotReady",
Message: strings.Join(rs, ","),
LastHeartbeatTime: currentTime,
@ -613,7 +613,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
// Append AppArmor status if it's enabled.
// TODO(timstclair): This is a temporary message until node feature reporting is added.
if newNodeReadyCondition.Status == api.ConditionTrue &&
if newNodeReadyCondition.Status == v1.ConditionTrue &&
kl.appArmorValidator != nil && kl.appArmorValidator.ValidateHost() == nil {
newNodeReadyCondition.Message = fmt.Sprintf("%s. AppArmor enabled", newNodeReadyCondition.Message)
}
@ -627,7 +627,7 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
readyConditionUpdated := false
needToRecordEvent := false
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == api.NodeReady {
if node.Status.Conditions[i].Type == v1.NodeReady {
if node.Status.Conditions[i].Status == newNodeReadyCondition.Status {
newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
} else {
@ -644,23 +644,23 @@ func (kl *Kubelet) setNodeReadyCondition(node *api.Node) {
node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
}
if needToRecordEvent {
if newNodeReadyCondition.Status == api.ConditionTrue {
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeReady)
if newNodeReadyCondition.Status == v1.ConditionTrue {
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeReady)
} else {
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotReady)
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotReady)
}
}
}
// setNodeMemoryPressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) {
func (kl *Kubelet) setNodeMemoryPressureCondition(node *v1.Node) {
currentTime := unversioned.NewTime(kl.clock.Now())
var condition *api.NodeCondition
var condition *v1.NodeCondition
// Check if NodeMemoryPressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == api.NodeMemoryPressure {
if node.Status.Conditions[i].Type == v1.NodeMemoryPressure {
condition = &node.Status.Conditions[i]
}
}
@ -668,9 +668,9 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) {
newCondition := false
// If the NodeMemoryPressure condition doesn't exist, create one
if condition == nil {
condition = &api.NodeCondition{
Type: api.NodeMemoryPressure,
Status: api.ConditionUnknown,
condition = &v1.NodeCondition{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
@ -683,25 +683,25 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) {
// Note: The conditions below take care of the case when a new NodeMemoryPressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to api.ConditionUnknown which matches either
// condition.Status != api.ConditionTrue or
// condition.Status != api.ConditionFalse in the conditions below depending on whether
// is created its status is set to v1.ConditionUnknown which matches either
// condition.Status != v1.ConditionTrue or
// condition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is under memory pressure or not.
if kl.evictionManager.IsUnderMemoryPressure() {
if condition.Status != api.ConditionTrue {
condition.Status = api.ConditionTrue
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = "KubeletHasInsufficientMemory"
condition.Message = "kubelet has insufficient memory available"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasInsufficientMemory")
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasInsufficientMemory")
}
} else {
if condition.Status != api.ConditionFalse {
condition.Status = api.ConditionFalse
if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Reason = "KubeletHasSufficientMemory"
condition.Message = "kubelet has sufficient memory available"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientMemory")
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientMemory")
}
}
@ -712,13 +712,13 @@ func (kl *Kubelet) setNodeMemoryPressureCondition(node *api.Node) {
// setNodeDiskPressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) {
func (kl *Kubelet) setNodeDiskPressureCondition(node *v1.Node) {
currentTime := unversioned.NewTime(kl.clock.Now())
var condition *api.NodeCondition
var condition *v1.NodeCondition
// Check if NodeDiskPressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == api.NodeDiskPressure {
if node.Status.Conditions[i].Type == v1.NodeDiskPressure {
condition = &node.Status.Conditions[i]
}
}
@ -726,9 +726,9 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) {
newCondition := false
// If the NodeDiskPressure condition doesn't exist, create one
if condition == nil {
condition = &api.NodeCondition{
Type: api.NodeDiskPressure,
Status: api.ConditionUnknown,
condition = &v1.NodeCondition{
Type: v1.NodeDiskPressure,
Status: v1.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
@ -741,25 +741,25 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) {
// Note: The conditions below take care of the case when a new NodeDiskressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to api.ConditionUnknown which matches either
// condition.Status != api.ConditionTrue or
// condition.Status != api.ConditionFalse in the conditions below depending on whether
// is created its status is set to v1.ConditionUnknown which matches either
// condition.Status != v1.ConditionTrue or
// condition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is under disk pressure or not.
if kl.evictionManager.IsUnderDiskPressure() {
if condition.Status != api.ConditionTrue {
condition.Status = api.ConditionTrue
if condition.Status != v1.ConditionTrue {
condition.Status = v1.ConditionTrue
condition.Reason = "KubeletHasDiskPressure"
condition.Message = "kubelet has disk pressure"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasDiskPressure")
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasDiskPressure")
}
} else {
if condition.Status != api.ConditionFalse {
condition.Status = api.ConditionFalse
if condition.Status != v1.ConditionFalse {
condition.Status = v1.ConditionFalse
condition.Reason = "KubeletHasNoDiskPressure"
condition.Message = "kubelet has no disk pressure"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasNoDiskPressure")
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasNoDiskPressure")
}
}
@ -769,13 +769,13 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) {
}
// Set OODcondition for the node.
func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
func (kl *Kubelet) setNodeOODCondition(node *v1.Node) {
currentTime := unversioned.NewTime(kl.clock.Now())
var nodeOODCondition *api.NodeCondition
var nodeOODCondition *v1.NodeCondition
// Check if NodeOutOfDisk condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == api.NodeOutOfDisk {
if node.Status.Conditions[i].Type == v1.NodeOutOfDisk {
nodeOODCondition = &node.Status.Conditions[i]
}
}
@ -783,9 +783,9 @@ func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
newOODCondition := false
// If the NodeOutOfDisk condition doesn't exist, create one.
if nodeOODCondition == nil {
nodeOODCondition = &api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionUnknown,
nodeOODCondition = &v1.NodeCondition{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionUnknown,
}
// nodeOODCondition cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append nodeOODCondition to the slice here none of the
@ -798,29 +798,29 @@ func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
// Note: The conditions below take care of the case when a new NodeOutOfDisk condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to api.ConditionUnknown which matches either
// nodeOODCondition.Status != api.ConditionTrue or
// nodeOODCondition.Status != api.ConditionFalse in the conditions below depending on whether
// is created its status is set to v1.ConditionUnknown which matches either
// nodeOODCondition.Status != v1.ConditionTrue or
// nodeOODCondition.Status != v1.ConditionFalse in the conditions below depending on whether
// the kubelet is out of disk or not.
if kl.isOutOfDisk() {
if nodeOODCondition.Status != api.ConditionTrue {
nodeOODCondition.Status = api.ConditionTrue
if nodeOODCondition.Status != v1.ConditionTrue {
nodeOODCondition.Status = v1.ConditionTrue
nodeOODCondition.Reason = "KubeletOutOfDisk"
nodeOODCondition.Message = "out of disk space"
nodeOODCondition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeOutOfDisk")
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeOutOfDisk")
}
} else {
if nodeOODCondition.Status != api.ConditionFalse {
if nodeOODCondition.Status != v1.ConditionFalse {
// Update the out of disk condition when the condition status is unknown even if we
// are within the outOfDiskTransitionFrequency duration. We do this to set the
// condition status correctly at kubelet startup.
if nodeOODCondition.Status == api.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency {
nodeOODCondition.Status = api.ConditionFalse
if nodeOODCondition.Status == v1.ConditionUnknown || kl.clock.Since(nodeOODCondition.LastTransitionTime.Time) >= kl.outOfDiskTransitionFrequency {
nodeOODCondition.Status = v1.ConditionFalse
nodeOODCondition.Reason = "KubeletHasSufficientDisk"
nodeOODCondition.Message = "kubelet has sufficient disk space available"
nodeOODCondition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasSufficientDisk")
kl.recordNodeStatusEvent(v1.EventTypeNormal, "NodeHasSufficientDisk")
} else {
glog.Infof("Node condition status for OutOfDisk is false, but last transition time is less than %s", kl.outOfDiskTransitionFrequency)
}
@ -837,12 +837,12 @@ func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
var oldNodeUnschedulable bool
// record if node schedulable change.
func (kl *Kubelet) recordNodeSchedulableEvent(node *api.Node) {
func (kl *Kubelet) recordNodeSchedulableEvent(node *v1.Node) {
if oldNodeUnschedulable != node.Spec.Unschedulable {
if node.Spec.Unschedulable {
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeNotSchedulable)
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeNotSchedulable)
} else {
kl.recordNodeStatusEvent(api.EventTypeNormal, events.NodeSchedulable)
kl.recordNodeStatusEvent(v1.EventTypeNormal, events.NodeSchedulable)
}
oldNodeUnschedulable = node.Spec.Unschedulable
}
@ -850,7 +850,7 @@ func (kl *Kubelet) recordNodeSchedulableEvent(node *api.Node) {
// Update VolumesInUse field in Node Status only after states are synced up at least once
// in volume reconciler.
func (kl *Kubelet) setNodeVolumesInUseStatus(node *api.Node) {
func (kl *Kubelet) setNodeVolumesInUseStatus(node *v1.Node) {
// Make sure to only update node status after reconciler starts syncing up states
if kl.volumeManager.ReconcilerStatesHasBeenSynced() {
node.Status.VolumesInUse = kl.volumeManager.GetVolumesInUse()
@ -861,7 +861,7 @@ func (kl *Kubelet) setNodeVolumesInUseStatus(node *api.Node) {
// any fields that are currently set.
// TODO(madhusudancs): Simplify the logic for setting node conditions and
// refactor the node status condition code out to a different file.
func (kl *Kubelet) setNodeStatus(node *api.Node) error {
func (kl *Kubelet) setNodeStatus(node *v1.Node) error {
for _, f := range kl.setNodeStatusFuncs {
if err := f(node); err != nil {
return err
@ -872,15 +872,15 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
// defaultNodeStatusFuncs is a factory that generates the default set of
// setNodeStatus funcs
func (kl *Kubelet) defaultNodeStatusFuncs() []func(*api.Node) error {
func (kl *Kubelet) defaultNodeStatusFuncs() []func(*v1.Node) error {
// initial set of node status update handlers, can be modified by Option's
withoutError := func(f func(*api.Node)) func(*api.Node) error {
return func(n *api.Node) error {
withoutError := func(f func(*v1.Node)) func(*v1.Node) error {
return func(n *v1.Node) error {
f(n)
return nil
}
}
return []func(*api.Node) error{
return []func(*v1.Node) error{
kl.setNodeAddress,
withoutError(kl.setNodeStatusInfo),
withoutError(kl.setNodeOODCondition),
@ -894,7 +894,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*api.Node) error {
// SetNodeStatus returns a functional Option that adds the given node status
// update handler to the Kubelet
func SetNodeStatus(f func(*api.Node) error) Option {
func SetNodeStatus(f func(*v1.Node) error) Option {
return func(k *Kubelet) {
k.setNodeStatusFuncs = append(k.setNodeStatusFuncs, f)
}

View File

@ -27,11 +27,11 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
@ -49,7 +49,7 @@ const (
)
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
func generateTestingImageList(count int) ([]kubecontainer.Image, []api.ContainerImage) {
func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) {
// imageList is randomly generated image list
var imageList []kubecontainer.Image
for ; count > 0; count-- {
@ -64,10 +64,10 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []api.Container
// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
// 1. sort the imageList by size
sort.Sort(sliceutils.ByImageSize(imageList))
// 2. convert sorted imageList to api.ContainerImage list
var expectedImageList []api.ContainerImage
// 2. convert sorted imageList to v1.ContainerImage list
var expectedImageList []v1.ContainerImage
for _, kubeImage := range imageList {
apiImage := api.ContainerImage{
apiImage := v1.ContainerImage{
Names: kubeImage.RepoTags[0:maxNamesPerImageInNodeStatus],
SizeBytes: kubeImage.Size,
}
@ -96,8 +96,8 @@ func TestUpdateNewNodeStatus(t *testing.T) {
t, inputImageList, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
@ -120,45 +120,45 @@ func TestUpdateNewNodeStatus(t *testing.T) {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
expectedNode := &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeDiskPressure,
Status: api.ConditionFalse,
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: fmt.Sprintf("kubelet has no disk pressure"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
},
NodeInfo: api.NodeSystemInfo{
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
@ -170,22 +170,22 @@ func TestUpdateNewNodeStatus(t *testing.T) {
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
{Type: api.NodeHostName, Address: testKubeletHostname},
Addresses: []v1.NodeAddress{
{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
Images: expectedImageList,
},
@ -202,7 +202,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node)
if !ok {
t.Errorf("unexpected object type")
}
@ -218,14 +218,14 @@ func TestUpdateNewNodeStatus(t *testing.T) {
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if maxImagesInNodeStatus != len(updatedNode.Status.Images) {
t.Errorf("unexpected image list length in node status, expected: %v, got: %v", maxImagesInNodeStatus, len(updatedNode.Status.Images))
} else {
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
if !v1.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
@ -236,8 +236,8 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
machineInfo := &cadvisorapi.MachineInfo{
MachineID: "123",
@ -262,9 +262,9 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
kubelet.outOfDiskTransitionFrequency = 10 * time.Second
expectedNodeOutOfDiskCondition := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
expectedNodeOutOfDiskCondition := v1.NodeCondition{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
@ -282,12 +282,12 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node)
if !ok {
t.Errorf("unexpected object type")
}
var oodCondition api.NodeCondition
var oodCondition v1.NodeCondition
for i, cond := range updatedNode.Status.Conditions {
if cond.LastHeartbeatTime.IsZero() {
t.Errorf("unexpected zero last probe timestamp for %v condition", cond.Type)
@ -297,7 +297,7 @@ func TestUpdateNewNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T) {
}
updatedNode.Status.Conditions[i].LastHeartbeatTime = unversioned.Time{}
updatedNode.Status.Conditions[i].LastTransitionTime = unversioned.Time{}
if cond.Type == api.NodeOutOfDisk {
if cond.Type == v1.NodeOutOfDisk {
oodCondition = updatedNode.Status.Conditions[i]
}
}
@ -311,54 +311,54 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Type: v1.NodeOutOfDisk,
Status: v1.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeDiskPressure,
Status: api.ConditionFalse,
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(3000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
},
},
},
@ -384,45 +384,45 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
expectedNode := &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Type: v1.NodeOutOfDisk,
Status: v1.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeDiskPressure,
Status: api.ConditionFalse,
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.Time{}, // placeholder
LastTransitionTime: unversioned.Time{}, // placeholder
},
},
NodeInfo: api.NodeSystemInfo{
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
@ -434,25 +434,25 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(19900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
{Type: api.NodeHostName, Address: testKubeletHostname},
Addresses: []v1.NodeAddress{
{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
// images will be sorted from max to min in node status.
Images: []api.ContainerImage{
Images: []v1.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
@ -477,7 +477,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
updatedNode, ok := updateAction.GetObject().(*v1.Node)
if !ok {
t.Errorf("unexpected object type")
}
@ -494,11 +494,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
}
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != api.NodeReady {
if updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type != v1.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
if !v1.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
@ -508,23 +508,23 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "KubeletReady",
Message: fmt.Sprintf("kubelet is posting ready status"),
LastHeartbeatTime: unversioned.NewTime(clock.Now()),
LastTransitionTime: unversioned.NewTime(clock.Now()),
},
{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
Type: v1.NodeOutOfDisk,
Status: v1.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.NewTime(clock.Now()),
@ -558,17 +558,17 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
kubelet.outOfDiskTransitionFrequency = 5 * time.Second
ood := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionTrue,
ood := v1.NodeCondition{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionTrue,
Reason: "KubeletOutOfDisk",
Message: "out of disk space",
LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder
LastTransitionTime: unversioned.NewTime(clock.Now()), // placeholder
}
noOod := api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
noOod := v1.NodeCondition{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
LastHeartbeatTime: unversioned.NewTime(clock.Now()), // placeholder
@ -578,7 +578,7 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
testCases := []struct {
rootFsAvail uint64
dockerFsAvail uint64
expected api.NodeCondition
expected v1.NodeCondition
}{
{
// NodeOutOfDisk==false
@ -640,15 +640,15 @@ func TestUpdateExistingNodeOutOfDiskStatusWithTransitionFrequency(t *testing.T)
if !ok {
t.Errorf("%d. unexpected action type. expected UpdateAction, got %#v", tcIdx, actions[1])
}
updatedNode, ok := updateAction.GetObject().(*api.Node)
updatedNode, ok := updateAction.GetObject().(*v1.Node)
if !ok {
t.Errorf("%d. unexpected object type", tcIdx)
}
kubeClient.ClearActions()
var oodCondition api.NodeCondition
var oodCondition v1.NodeCondition
for i, cond := range updatedNode.Status.Conditions {
if cond.Type == api.NodeOutOfDisk {
if cond.Type == v1.NodeOutOfDisk {
oodCondition = updatedNode.Status.Conditions[i]
}
}
@ -665,8 +665,8 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
kubelet := testKubelet.kubelet
clock := testKubelet.fakeClock
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname}},
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname}},
}}).ReactionChain
mockCadvisor := testKubelet.fakeCadvisor
mockCadvisor.On("Start").Return(nil)
@ -689,30 +689,30 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
t.Fatalf("can't update disk space manager: %v", err)
}
expectedNode := &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{},
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
expectedNode := &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: api.NodeOutOfDisk,
Status: api.ConditionFalse,
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientDisk",
Message: "kubelet has sufficient disk space available",
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeMemoryPressure,
Status: api.ConditionFalse,
Type: v1.NodeMemoryPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasSufficientMemory",
Message: fmt.Sprintf("kubelet has sufficient memory available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeDiskPressure,
Status: api.ConditionFalse,
Type: v1.NodeDiskPressure,
Status: v1.ConditionFalse,
Reason: "KubeletHasNoDiskPressure",
Message: fmt.Sprintf("kubelet has no disk pressure"),
LastHeartbeatTime: unversioned.Time{},
@ -720,7 +720,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
},
{}, //placeholder
},
NodeInfo: api.NodeSystemInfo{
NodeInfo: v1.NodeSystemInfo{
MachineID: "123",
SystemUUID: "abc",
BootID: "1b3",
@ -732,24 +732,24 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
KubeletVersion: version.Get().String(),
KubeProxyVersion: version.Get().String(),
},
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
Capacity: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
api.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
v1.ResourceNvidiaGPU: *resource.NewQuantity(0, resource.DecimalSI),
},
Addresses: []api.NodeAddress{
{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: api.NodeInternalIP, Address: "127.0.0.1"},
{Type: api.NodeHostName, Address: testKubeletHostname},
Addresses: []v1.NodeAddress{
{Type: v1.NodeLegacyHostIP, Address: "127.0.0.1"},
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
{Type: v1.NodeHostName, Address: testKubeletHostname},
},
Images: []api.ContainerImage{
Images: []v1.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
SizeBytes: 456,
@ -762,7 +762,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
},
}
checkNodeStatus := func(status api.ConditionStatus, reason string) {
checkNodeStatus := func(status v1.ConditionStatus, reason string) {
kubeClient.ClearActions()
if err := kubelet.updateNodeStatus(); err != nil {
t.Errorf("unexpected error: %v", err)
@ -774,7 +774,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
if !actions[1].Matches("update", "nodes") || actions[1].GetSubresource() != "status" {
t.Fatalf("unexpected actions: %v", actions)
}
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*api.Node)
updatedNode, ok := actions[1].(core.UpdateAction).GetObject().(*v1.Node)
if !ok {
t.Errorf("unexpected action type. expected UpdateAction, got %#v", actions[1])
}
@ -792,21 +792,21 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
lastIndex := len(updatedNode.Status.Conditions) - 1
if updatedNode.Status.Conditions[lastIndex].Type != api.NodeReady {
if updatedNode.Status.Conditions[lastIndex].Type != v1.NodeReady {
t.Errorf("unexpected node condition order. NodeReady should be last.")
}
if updatedNode.Status.Conditions[lastIndex].Message == "" {
t.Errorf("unexpected empty condition message")
}
updatedNode.Status.Conditions[lastIndex].Message = ""
expectedNode.Status.Conditions[lastIndex] = api.NodeCondition{
Type: api.NodeReady,
expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
Type: v1.NodeReady,
Status: status,
Reason: reason,
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
}
if !api.Semantic.DeepEqual(expectedNode, updatedNode) {
if !v1.Semantic.DeepEqual(expectedNode, updatedNode) {
t.Errorf("unexpected objects: %s", diff.ObjectDiff(expectedNode, updatedNode))
}
}
@ -815,17 +815,17 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report kubelet ready if the runtime check is updated
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionTrue, "KubeletReady")
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
// Should report kubelet not ready if the runtime check is out of date
clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report kubelet not ready if the runtime check failed
fakeRuntime := testKubelet.fakeRuntime
@ -833,7 +833,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
clock.SetTime(time.Now())
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Test cri integration.
kubelet.kubeletConfiguration.EnableCRI = true
@ -842,12 +842,12 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
// Should report node not ready if runtime status is nil.
fakeRuntime.RuntimeStatus = nil
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node not ready if runtime status is empty.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node not ready if RuntimeReady is false.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
@ -857,7 +857,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
// Should report node ready if RuntimeReady is true.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
@ -867,7 +867,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionTrue, "KubeletReady")
checkNodeStatus(v1.ConditionTrue, "KubeletReady")
// Should report node not ready if NetworkReady is false.
fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
@ -877,14 +877,14 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
},
}
kubelet.updateRuntimeUp()
checkNodeStatus(api.ConditionFalse, "KubeletNotReady")
checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
}
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{}}).ReactionChain
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
@ -900,15 +900,15 @@ func TestRegisterWithApiServer(t *testing.T) {
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an error on create.
return true, &api.Node{}, &apierrors.StatusError{
return true, &v1.Node{}, &apierrors.StatusError{
ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonAlreadyExists},
}
})
kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
// Return an existing (matching) node on get.
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
return true, &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Spec: v1.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
@ -961,10 +961,10 @@ func TestTryRegisterWithApiServer(t *testing.T) {
ErrStatus: unversioned.Status{Reason: unversioned.StatusReasonConflict},
}
newNode := func(cmad bool, externalID string) *api.Node {
node := &api.Node{
ObjectMeta: api.ObjectMeta{},
Spec: api.NodeSpec{
newNode := func(cmad bool, externalID string) *v1.Node {
node := &v1.Node{
ObjectMeta: v1.ObjectMeta{},
Spec: v1.NodeSpec{
ExternalID: externalID,
},
}
@ -979,8 +979,8 @@ func TestTryRegisterWithApiServer(t *testing.T) {
cases := []struct {
name string
newNode *api.Node
existingNode *api.Node
newNode *v1.Node
existingNode *v1.Node
createError error
getError error
updateError error
@ -993,7 +993,7 @@ func TestTryRegisterWithApiServer(t *testing.T) {
}{
{
name: "success case - new node",
newNode: &api.Node{},
newNode: &v1.Node{},
expectedResult: true,
expectedActions: 1,
},
@ -1111,23 +1111,23 @@ func TestTryRegisterWithApiServer(t *testing.T) {
}
if tc.testSavedNode {
var savedNode *api.Node
var savedNode *v1.Node
var ok bool
t.Logf("actions: %v: %+v", len(actions), actions)
action := actions[tc.savedNodeIndex]
if action.GetVerb() == "create" {
createAction := action.(core.CreateAction)
savedNode, ok = createAction.GetObject().(*api.Node)
savedNode, ok = createAction.GetObject().(*v1.Node)
if !ok {
t.Errorf("%v: unexpected type; couldn't convert to *api.Node: %+v", tc.name, createAction.GetObject())
t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, createAction.GetObject())
continue
}
} else if action.GetVerb() == "update" {
updateAction := action.(core.UpdateAction)
savedNode, ok = updateAction.GetObject().(*api.Node)
savedNode, ok = updateAction.GetObject().(*v1.Node)
if !ok {
t.Errorf("%v: unexpected type; couldn't convert to *api.Node: %+v", tc.name, updateAction.GetObject())
t.Errorf("%v: unexpected type; couldn't convert to *v1.Node: %+v", tc.name, updateAction.GetObject())
continue
}
}

View File

@ -33,9 +33,10 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
utilpod "k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/api/v1"
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/api/v1/validation"
"k8s.io/kubernetes/pkg/fieldpath"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -72,7 +73,7 @@ func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
}
// getActivePods returns non-terminal pods
func (kl *Kubelet) getActivePods() []*api.Pod {
func (kl *Kubelet) getActivePods() []*v1.Pod {
allPods := kl.podManager.GetPods()
activePods := kl.filterOutTerminatedPods(allPods)
return activePods
@ -82,7 +83,7 @@ func (kl *Kubelet) getActivePods() []*api.Pod {
// Experimental. For now, we hardcode /dev/nvidia0 no matter what the user asks for
// (we only support one device per node).
// TODO: add support for more than 1 GPU after #28216.
func makeDevices(container *api.Container) []kubecontainer.DeviceInfo {
func makeDevices(container *v1.Container) []kubecontainer.DeviceInfo {
nvidiaGPULimit := container.Resources.Limits.NvidiaGPU()
if nvidiaGPULimit.Value() != 0 {
return []kubecontainer.DeviceInfo{
@ -96,14 +97,14 @@ func makeDevices(container *api.Container) []kubecontainer.DeviceInfo {
}
// makeMounts determines the mount points for the given container.
func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, hostDomain, podIP string, podVolumes kubecontainer.VolumeMap) ([]kubecontainer.Mount, error) {
// Kubernetes only mounts on /etc/hosts if :
// - container does not use hostNetwork and
// - container is not an infrastructure(pause) container
// - container is not already mounting on /etc/hosts
// When the pause container is being created, its IP is still unknown. Hence, PodIP will not have been set.
// OS is not Windows
mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.SecurityContext.HostNetwork) && len(podIP) > 0 && runtime.GOOS != "windows"
mountEtcHostsFile := (pod.Spec.SecurityContext == nil || !pod.Spec.HostNetwork) && len(podIP) > 0 && runtime.GOOS != "windows"
glog.V(3).Infof("container: %v/%v/%v podIP: %q creating hosts mount: %v", pod.Namespace, pod.Name, container.Name, podIP, mountEtcHostsFile)
mounts := []kubecontainer.Mount{}
for _, mount := range container.VolumeMounts {
@ -198,7 +199,7 @@ func ensureHostsFile(fileName, hostIP, hostName, hostDomainName string) error {
return ioutil.WriteFile(fileName, buffer.Bytes(), 0644)
}
func makePortMappings(container *api.Container) (ports []kubecontainer.PortMapping) {
func makePortMappings(container *v1.Container) (ports []kubecontainer.PortMapping) {
names := make(map[string]struct{})
for _, p := range container.Ports {
pm := kubecontainer.PortMapping{
@ -248,7 +249,7 @@ func truncatePodHostnameIfNeeded(podName, hostname string) (string, error) {
// GeneratePodHostNameAndDomain creates a hostname and domain name for a pod,
// given that pod's spec and annotations or returns an error.
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) {
func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
// TODO(vmarmol): Handle better.
clusterDomain := kl.clusterDomain
podAnnotations := pod.Annotations
@ -290,7 +291,7 @@ func (kl *Kubelet) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, e
// GenerateRunContainerOptions generates the RunContainerOptions, which can be used by
// the container runtime to set parameters for launching a container.
func (kl *Kubelet) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
func (kl *Kubelet) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
var err error
pcm := kl.containerManager.NewPodContainerManager()
_, podContainerName := pcm.GetPodContainerName(pod)
@ -345,7 +346,7 @@ var masterServices = sets.NewString("kubernetes")
// pod in namespace ns should see.
func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
var (
serviceMap = make(map[string]*api.Service)
serviceMap = make(map[string]*v1.Service)
m = make(map[string]string)
)
@ -364,7 +365,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
for i := range services {
service := services[i]
// ignore services where ClusterIP is "None" or empty
if !api.IsServiceIPSet(service) {
if !v1.IsServiceIPSet(service) {
continue
}
serviceName := service.Name
@ -385,7 +386,7 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
}
}
mappedServices := []*api.Service{}
mappedServices := []*v1.Service{}
for key := range serviceMap {
mappedServices = append(mappedServices, serviceMap[key])
}
@ -397,11 +398,11 @@ func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
}
// Make the environment variables for a pod in the given namespace.
func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Container, podIP string) ([]kubecontainer.EnvVar, error) {
func (kl *Kubelet) makeEnvironmentVariables(pod *v1.Pod, container *v1.Container, podIP string) ([]kubecontainer.EnvVar, error) {
var result []kubecontainer.EnvVar
// Note: These are added to the docker Config, but are not included in the checksum computed
// by dockertools.BuildDockerName(...). That way, we can still determine whether an
// api.Container is already running by its hash. (We don't want to restart a container just
// v1.Container is already running by its hash. (We don't want to restart a container just
// because some service changed.)
//
// Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
@ -424,8 +425,8 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain
// 3. Add remaining service environment vars
var (
tmpEnv = make(map[string]string)
configMaps = make(map[string]*api.ConfigMap)
secrets = make(map[string]*api.Secret)
configMaps = make(map[string]*v1.ConfigMap)
secrets = make(map[string]*v1.Secret)
mappingFunc = expansion.MappingFuncFor(tmpEnv, serviceEnv)
)
for _, envVar := range container.Env {
@ -510,7 +511,7 @@ func (kl *Kubelet) makeEnvironmentVariables(pod *api.Pod, container *api.Contain
// podFieldSelectorRuntimeValue returns the runtime value of the given
// selector for a pod.
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *api.ObjectFieldSelector, pod *api.Pod, podIP string) (string, error) {
func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *v1.ObjectFieldSelector, pod *v1.Pod, podIP string) (string, error) {
internalFieldPath, _, err := api.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "")
if err != nil {
return "", err
@ -527,7 +528,7 @@ func (kl *Kubelet) podFieldSelectorRuntimeValue(fs *api.ObjectFieldSelector, pod
}
// containerResourceRuntimeValue returns the value of the provided container resource
func containerResourceRuntimeValue(fs *api.ResourceFieldSelector, pod *api.Pod, container *api.Container) (string, error) {
func containerResourceRuntimeValue(fs *v1.ResourceFieldSelector, pod *v1.Pod, container *v1.Container) (string, error) {
containerName := fs.ContainerName
if len(containerName) == 0 {
return fieldpath.ExtractContainerResourceValue(fs, container)
@ -538,7 +539,7 @@ func containerResourceRuntimeValue(fs *api.ResourceFieldSelector, pod *api.Pod,
// One of the following arguments must be non-nil: runningPod, status.
// TODO: Modify containerRuntime.KillPod() to accept the right arguments.
func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
func (kl *Kubelet) killPod(pod *v1.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus, gracePeriodOverride *int64) error {
var p kubecontainer.Pod
if runningPod != nil {
p = *runningPod
@ -577,7 +578,7 @@ func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *
}
// makePodDataDirs creates the dirs for the pod datas.
func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error {
func (kl *Kubelet) makePodDataDirs(pod *v1.Pod) error {
uid := pod.UID
if err := os.MkdirAll(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
@ -592,8 +593,8 @@ func (kl *Kubelet) makePodDataDirs(pod *api.Pod) error {
}
// returns whether the pod uses the host network namespace.
func podUsesHostNetwork(pod *api.Pod) bool {
return pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.HostNetwork
func podUsesHostNetwork(pod *v1.Pod) bool {
return pod.Spec.HostNetwork
}
// getPullSecretsForPod inspects the Pod and retrieves the referenced pull
@ -601,8 +602,8 @@ func podUsesHostNetwork(pod *api.Pod) bool {
// TODO: duplicate secrets are being retrieved multiple times and there
// is no cache. Creating and using a secret manager interface will make this
// easier to address.
func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) {
pullSecrets := []api.Secret{}
func (kl *Kubelet) getPullSecretsForPod(pod *v1.Pod) ([]v1.Secret, error) {
pullSecrets := []v1.Secret{}
for _, secretRef := range pod.Spec.ImagePullSecrets {
secret, err := kl.kubeClient.Core().Secrets(pod.Namespace).Get(secretRef.Name)
@ -618,8 +619,8 @@ func (kl *Kubelet) getPullSecretsForPod(pod *api.Pod) ([]api.Secret, error) {
}
// Returns true if pod is in the terminated state ("Failed" or "Succeeded").
func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool {
var status api.PodStatus
func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool {
var status v1.PodStatus
// Check the cached pod status which was set after the last sync.
status, ok := kl.statusManager.GetPodStatus(pod.UID)
if !ok {
@ -628,7 +629,7 @@ func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool {
// restarted.
status = pod.Status
}
if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded {
if status.Phase == v1.PodFailed || status.Phase == v1.PodSucceeded {
return true
}
@ -637,8 +638,8 @@ func (kl *Kubelet) podIsTerminated(pod *api.Pod) bool {
// filterOutTerminatedPods returns the given pods which the status manager
// does not consider failed or succeeded.
func (kl *Kubelet) filterOutTerminatedPods(pods []*api.Pod) []*api.Pod {
var filteredPods []*api.Pod
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
var filteredPods []*v1.Pod
for _, p := range pods {
if kl.podIsTerminated(p) {
continue
@ -650,7 +651,7 @@ func (kl *Kubelet) filterOutTerminatedPods(pods []*api.Pod) []*api.Pod {
// removeOrphanedPodStatuses removes obsolete entries in podStatus where
// the pod is no longer considered bound to this node.
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*api.Pod, mirrorPods []*api.Pod) {
func (kl *Kubelet) removeOrphanedPodStatuses(pods []*v1.Pod, mirrorPods []*v1.Pod) {
podUIDs := make(map[types.UID]bool)
for _, pod := range pods {
podUIDs[pod.UID] = true
@ -778,7 +779,7 @@ func (kl *Kubelet) podKiller() {
break
}
killing.Insert(string(runningPod.ID))
go func(apiPod *api.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) {
go func(apiPod *v1.Pod, runningPod *kubecontainer.Pod, ch chan types.UID) {
defer func() {
ch <- runningPod.ID
}()
@ -796,7 +797,7 @@ func (kl *Kubelet) podKiller() {
}
// checkHostPortConflicts detects pods with conflicted host ports.
func hasHostPortConflicts(pods []*api.Pod) bool {
func hasHostPortConflicts(pods []*v1.Pod) bool {
ports := sets.String{}
for _, pod := range pods {
if errs := validation.AccumulateUniqueHostPorts(pod.Spec.Containers, &ports, field.NewPath("spec", "containers")); len(errs) > 0 {
@ -815,13 +816,13 @@ func hasHostPortConflicts(pods []*api.Pod) bool {
// of the container. The previous flag will only return the logs for the last terminated container, otherwise, the current
// running container is preferred over a previous termination. If info about the container is not available then a specific
// error is returned to the end user.
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *v1.PodStatus, containerName string, previous bool) (containerID kubecontainer.ContainerID, err error) {
var cID string
cStatus, found := api.GetContainerStatus(podStatus.ContainerStatuses, containerName)
cStatus, found := v1.GetContainerStatus(podStatus.ContainerStatuses, containerName)
// if not found, check the init containers
if !found {
cStatus, found = api.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
cStatus, found = v1.GetContainerStatus(podStatus.InitContainerStatuses, containerName)
}
if !found {
return kubecontainer.ContainerID{}, fmt.Errorf("container %q in pod %q is not available", containerName, podName)
@ -866,7 +867,7 @@ func (kl *Kubelet) validateContainerLogStatus(podName string, podStatus *api.Pod
// GetKubeletContainerLogs returns logs from the container
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *api.PodLogOptions, stdout, stderr io.Writer) error {
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
// Pod workers periodically write status to statusManager. If status is not
// cached there, something is wrong (or kubelet just restarted and hasn't
// caught up yet). Just assume the pod is not ready yet.
@ -914,12 +915,12 @@ func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName string, lo
// GetPhase returns the phase of a pod given its container info.
// This func is exported to simplify integration with 3rd party kubelet
// integrations like kubernetes-mesos.
func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
func GetPhase(spec *v1.PodSpec, info []v1.ContainerStatus) v1.PodPhase {
initialized := 0
pendingInitialization := 0
failedInitialization := 0
for _, container := range spec.InitContainers {
containerStatus, ok := api.GetContainerStatus(info, container.Name)
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
pendingInitialization++
continue
@ -956,7 +957,7 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
failed := 0
succeeded := 0
for _, container := range spec.Containers {
containerStatus, ok := api.GetContainerStatus(info, container.Name)
containerStatus, ok := v1.GetContainerStatus(info, container.Name)
if !ok {
unknown++
continue
@ -983,8 +984,8 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
}
}
if failedInitialization > 0 && spec.RestartPolicy == api.RestartPolicyNever {
return api.PodFailed
if failedInitialization > 0 && spec.RestartPolicy == v1.RestartPolicyNever {
return v1.PodFailed
}
switch {
@ -993,46 +994,46 @@ func GetPhase(spec *api.PodSpec, info []api.ContainerStatus) api.PodPhase {
case waiting > 0:
glog.V(5).Infof("pod waiting > 0, pending")
// One or more containers has not been started
return api.PodPending
return v1.PodPending
case running > 0 && unknown == 0:
// All containers have been started, and at least
// one container is running
return api.PodRunning
return v1.PodRunning
case running == 0 && stopped > 0 && unknown == 0:
// All containers are terminated
if spec.RestartPolicy == api.RestartPolicyAlways {
if spec.RestartPolicy == v1.RestartPolicyAlways {
// All containers are in the process of restarting
return api.PodRunning
return v1.PodRunning
}
if stopped == succeeded {
// RestartPolicy is not Always, and all
// containers are terminated in success
return api.PodSucceeded
return v1.PodSucceeded
}
if spec.RestartPolicy == api.RestartPolicyNever {
if spec.RestartPolicy == v1.RestartPolicyNever {
// RestartPolicy is Never, and all containers are
// terminated with at least one in failure
return api.PodFailed
return v1.PodFailed
}
// RestartPolicy is OnFailure, and at least one in failure
// and in the process of restarting
return api.PodRunning
return v1.PodRunning
default:
glog.V(5).Infof("pod default case, pending")
return api.PodPending
return v1.PodPending
}
}
// generateAPIPodStatus creates the final API pod status for a pod, given the
// internal pod status.
func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) api.PodStatus {
func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodStatus {
glog.V(3).Infof("Generating status for %q", format.Pod(pod))
// check if an internal module has requested the pod is evicted.
for _, podSyncHandler := range kl.PodSyncHandlers {
if result := podSyncHandler.ShouldEvict(pod); result.Evict {
return api.PodStatus{
Phase: api.PodFailed,
return v1.PodStatus{
Phase: v1.PodFailed,
Reason: result.Reason,
Message: result.Message,
}
@ -1043,7 +1044,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P
// Assume info is ready to process
spec := &pod.Spec
allStatus := append(append([]api.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
s.Phase = GetPhase(spec, allStatus)
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
@ -1051,12 +1052,12 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P
// s (the PodStatus we are creating) will not have a PodScheduled condition yet, because converStatusToAPIStatus()
// does not create one. If the existing PodStatus has a PodScheduled condition, then copy it into s and make sure
// it is set to true. If the existing PodStatus does not have a PodScheduled condition, then create one that is set to true.
if _, oldPodScheduled := api.GetPodCondition(&pod.Status, api.PodScheduled); oldPodScheduled != nil {
if _, oldPodScheduled := v1.GetPodCondition(&pod.Status, v1.PodScheduled); oldPodScheduled != nil {
s.Conditions = append(s.Conditions, *oldPodScheduled)
}
api.UpdatePodCondition(&pod.Status, &api.PodCondition{
Type: api.PodScheduled,
Status: api.ConditionTrue,
v1.UpdatePodCondition(&pod.Status, &v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
})
if !kl.standaloneMode {
@ -1077,8 +1078,8 @@ func (kl *Kubelet) generateAPIPodStatus(pod *api.Pod, podStatus *kubecontainer.P
// convertStatusToAPIStatus creates an api PodStatus for the given pod from
// the given internal pod status. It is purely transformative and does not
// alter the kubelet state at all.
func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontainer.PodStatus) *api.PodStatus {
var apiPodStatus api.PodStatus
func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *v1.PodStatus {
var apiPodStatus v1.PodStatus
apiPodStatus.PodIP = podStatus.IP
apiPodStatus.ContainerStatuses = kl.convertToAPIContainerStatuses(
@ -1101,10 +1102,10 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *api.Pod, podStatus *kubecontain
// convertToAPIContainerStatuses converts the given internal container
// statuses into API container statuses.
func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubecontainer.PodStatus, previousStatus []api.ContainerStatus, containers []api.Container, hasInitContainers, isInitContainer bool) []api.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *api.ContainerStatus {
func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecontainer.PodStatus, previousStatus []v1.ContainerStatus, containers []v1.Container, hasInitContainers, isInitContainer bool) []v1.ContainerStatus {
convertContainerStatus := func(cs *kubecontainer.ContainerStatus) *v1.ContainerStatus {
cid := cs.ID.String()
status := &api.ContainerStatus{
status := &v1.ContainerStatus{
Name: cs.Name,
RestartCount: int32(cs.RestartCount),
Image: cs.Image,
@ -1113,9 +1114,9 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco
}
switch cs.State {
case kubecontainer.ContainerStateRunning:
status.State.Running = &api.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)}
status.State.Running = &v1.ContainerStateRunning{StartedAt: unversioned.NewTime(cs.StartedAt)}
case kubecontainer.ContainerStateExited:
status.State.Terminated = &api.ContainerStateTerminated{
status.State.Terminated = &v1.ContainerStateTerminated{
ExitCode: int32(cs.ExitCode),
Reason: cs.Reason,
Message: cs.Message,
@ -1124,26 +1125,26 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco
ContainerID: cid,
}
default:
status.State.Waiting = &api.ContainerStateWaiting{}
status.State.Waiting = &v1.ContainerStateWaiting{}
}
return status
}
// Fetch old containers statuses from old pod status.
oldStatuses := make(map[string]api.ContainerStatus, len(containers))
oldStatuses := make(map[string]v1.ContainerStatus, len(containers))
for _, status := range previousStatus {
oldStatuses[status.Name] = status
}
// Set all container statuses to default waiting state
statuses := make(map[string]*api.ContainerStatus, len(containers))
defaultWaitingState := api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "ContainerCreating"}}
statuses := make(map[string]*v1.ContainerStatus, len(containers))
defaultWaitingState := v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"}}
if hasInitContainers {
defaultWaitingState = api.ContainerState{Waiting: &api.ContainerStateWaiting{Reason: "PodInitializing"}}
defaultWaitingState = v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "PodInitializing"}}
}
for _, container := range containers {
status := &api.ContainerStatus{
status := &v1.ContainerStatus{
Name: container.Name,
Image: container.Image,
State: defaultWaitingState,
@ -1206,8 +1207,8 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco
if status.State.Terminated != nil {
status.LastTerminationState = status.State
}
status.State = api.ContainerState{
Waiting: &api.ContainerStateWaiting{
status.State = v1.ContainerState{
Waiting: &v1.ContainerStateWaiting{
Reason: reason.Error(),
Message: message,
},
@ -1215,7 +1216,7 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *api.Pod, podStatus *kubeco
statuses[container.Name] = status
}
var containerStatuses []api.ContainerStatus
var containerStatuses []v1.ContainerStatus
for _, status := range statuses {
containerStatuses = append(containerStatuses, *status)
}
@ -1386,7 +1387,7 @@ func (kl *Kubelet) GetPortForward(podName, podNamespace string, podUID types.UID
// running and whose volumes have been cleaned up.
func (kl *Kubelet) cleanupOrphanedPodCgroups(
cgroupPods map[types.UID]cm.CgroupName,
pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
// Add all running and existing terminated pods to a set allPods
allPods := sets.NewString()
for _, pod := range pods {
@ -1426,7 +1427,7 @@ func (kl *Kubelet) cleanupOrphanedPodCgroups(
// NOTE: when if a container shares any namespace with another container it must also share the user namespace
// or it will not have the correct capabilities in the namespace. This means that host user namespace
// is enabled per pod, not per container.
func (kl *Kubelet) enableHostUserNamespace(pod *api.Pod) bool {
func (kl *Kubelet) enableHostUserNamespace(pod *v1.Pod) bool {
if hasPrivilegedContainer(pod) || hasHostNamespace(pod) ||
hasHostVolume(pod) || hasNonNamespacedCapability(pod) || kl.hasHostMountPVC(pod) {
return true
@ -1435,7 +1436,7 @@ func (kl *Kubelet) enableHostUserNamespace(pod *api.Pod) bool {
}
// hasPrivilegedContainer returns true if any of the containers in the pod are privileged.
func hasPrivilegedContainer(pod *api.Pod) bool {
func hasPrivilegedContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil &&
c.SecurityContext.Privileged != nil &&
@ -1447,7 +1448,7 @@ func hasPrivilegedContainer(pod *api.Pod) bool {
}
// hasNonNamespacedCapability returns true if MKNOD, SYS_TIME, or SYS_MODULE is requested for any container.
func hasNonNamespacedCapability(pod *api.Pod) bool {
func hasNonNamespacedCapability(pod *v1.Pod) bool {
for _, c := range pod.Spec.Containers {
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
for _, cap := range c.SecurityContext.Capabilities.Add {
@ -1462,7 +1463,7 @@ func hasNonNamespacedCapability(pod *api.Pod) bool {
}
// hasHostVolume returns true if the pod spec has a HostPath volume.
func hasHostVolume(pod *api.Pod) bool {
func hasHostVolume(pod *v1.Pod) bool {
for _, v := range pod.Spec.Volumes {
if v.HostPath != nil {
return true
@ -1472,15 +1473,15 @@ func hasHostVolume(pod *api.Pod) bool {
}
// hasHostNamespace returns true if hostIPC, hostNetwork, or hostPID are set to true.
func hasHostNamespace(pod *api.Pod) bool {
func hasHostNamespace(pod *v1.Pod) bool {
if pod.Spec.SecurityContext == nil {
return false
}
return pod.Spec.SecurityContext.HostIPC || pod.Spec.SecurityContext.HostNetwork || pod.Spec.SecurityContext.HostPID
return pod.Spec.HostIPC || pod.Spec.HostNetwork || pod.Spec.HostPID
}
// hasHostMountPVC returns true if a PVC is referencing a HostPath volume.
func (kl *Kubelet) hasHostMountPVC(pod *api.Pod) bool {
func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
for _, volume := range pod.Spec.Volumes {
if volume.PersistentVolumeClaim != nil {
pvc, err := kl.kubeClient.Core().PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName)

File diff suppressed because it is too large Load Diff

View File

@ -20,6 +20,7 @@ import (
"fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/fieldpath"
)
@ -30,7 +31,7 @@ import (
// the node allocatable.
// TODO: if/when we have pod level resources, we need to update this function
// to use those limits instead of node allocatable.
func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.Container) (*api.Pod, *api.Container, error) {
func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) {
if pod == nil {
return nil, nil, fmt.Errorf("invalid input, pod cannot be nil")
}
@ -45,7 +46,7 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.C
if err != nil {
return nil, nil, fmt.Errorf("failed to perform a deep copy of pod object: %v", err)
}
outputPod, ok := podCopy.(*api.Pod)
outputPod, ok := podCopy.(*v1.Pod)
if !ok {
return nil, nil, fmt.Errorf("unexpected type returned from deep copy of pod object")
}
@ -53,13 +54,13 @@ func (kl *Kubelet) defaultPodLimitsForDownwardApi(pod *api.Pod, container *api.C
fieldpath.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)
}
var outputContainer *api.Container
var outputContainer *v1.Container
if container != nil {
containerCopy, err := api.Scheme.DeepCopy(container)
if err != nil {
return nil, nil, fmt.Errorf("failed to perform a deep copy of container object: %v", err)
}
outputContainer, ok = containerCopy.(*api.Container)
outputContainer, ok = containerCopy.(*v1.Container)
if !ok {
return nil, nil, fmt.Errorf("unexpected type returned from deep copy of container object")
}

View File

@ -23,8 +23,8 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)
@ -41,19 +41,19 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
tk.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
tk.kubelet.reservation = kubetypes.Reservation{
Kubernetes: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("4Gi"),
Kubernetes: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
v1.ResourceMemory: resource.MustParse("4Gi"),
},
System: api.ResourceList{
api.ResourceCPU: resource.MustParse("1"),
api.ResourceMemory: resource.MustParse("2Gi"),
System: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
v1.ResourceMemory: resource.MustParse("2Gi"),
},
}
cases := []struct {
pod *api.Pod
expected *api.Pod
pod *v1.Pod
expected *v1.Pod
}{
{
pod: getPod("0", "0"),
@ -76,26 +76,26 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
for idx, tc := range cases {
actual, _, err := tk.kubelet.defaultPodLimitsForDownwardApi(tc.pod, nil)
as.Nil(err, "failed to default pod limits: %v", err)
if !api.Semantic.DeepEqual(tc.expected, actual) {
if !v1.Semantic.DeepEqual(tc.expected, actual) {
as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual)
}
}
}
func getPod(cpuLimit, memoryLimit string) *api.Pod {
resources := api.ResourceRequirements{}
func getPod(cpuLimit, memoryLimit string) *v1.Pod {
resources := v1.ResourceRequirements{}
if cpuLimit != "" || memoryLimit != "" {
resources.Limits = make(api.ResourceList)
resources.Limits = make(v1.ResourceList)
}
if cpuLimit != "" {
resources.Limits[api.ResourceCPU] = resource.MustParse(cpuLimit)
resources.Limits[v1.ResourceCPU] = resource.MustParse(cpuLimit)
}
if memoryLimit != "" {
resources.Limits[api.ResourceMemory] = resource.MustParse(memoryLimit)
resources.Limits[v1.ResourceMemory] = resource.MustParse(memoryLimit)
}
return &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
return &v1.Pod{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Resources: resources,

File diff suppressed because it is too large Load Diff

View File

@ -21,7 +21,7 @@ import (
"os"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
utilerrors "k8s.io/kubernetes/pkg/util/errors"
@ -65,7 +65,7 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
// newVolumeMounterFromPlugins attempts to find a plugin by volume spec, pod
// and volume options and then creates a Mounter.
// Returns a valid Unmounter or an error.
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
if err != nil {
return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
@ -81,7 +81,7 @@ func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod,
// cleanupOrphanedPodDirs removes the volumes of pods that should not be
// running and that have no containers running.
func (kl *Kubelet) cleanupOrphanedPodDirs(
pods []*api.Pod, runningPods []*kubecontainer.Pod) error {
pods []*v1.Pod, runningPods []*kubecontainer.Pod) error {
allPods := sets.NewString()
for _, pod := range pods {
allPods.Insert(string(pod.UID))

View File

@ -21,7 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/volume"
@ -33,18 +33,18 @@ func TestPodVolumesExist(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pods := []*api.Pod{
pods := []*v1.Pod{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device1",
},
},
@ -53,16 +53,16 @@ func TestPodVolumesExist(t *testing.T) {
},
},
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "pod2",
UID: "pod2uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol2",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device2",
},
},
@ -71,16 +71,16 @@ func TestPodVolumesExist(t *testing.T) {
},
},
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "pod3",
UID: "pod3uid",
},
Spec: api.PodSpec{
Volumes: []api.Volume{
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol3",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device3",
},
},
@ -117,12 +117,12 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
@ -135,7 +135,7 @@ func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
close(stopCh)
}()
kubelet.podManager.SetPods([]*api.Pod{pod})
kubelet.podManager.SetPods([]*v1.Pod{pod})
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
assert.NoError(t, err)
@ -162,12 +162,12 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
@ -181,7 +181,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
}()
// Add pod
kubelet.podManager.SetPods([]*api.Pod{pod})
kubelet.podManager.SetPods([]*v1.Pod{pod})
// Verify volumes attached
err := kubelet.volumeManager.WaitForAttachAndMount(pod)
@ -207,7 +207,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
// Remove pod
kubelet.podManager.SetPods([]*api.Pod{})
kubelet.podManager.SetPods([]*v1.Pod{})
assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod))
@ -222,7 +222,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin))
// Verify volumes detached and no longer reported as in use
assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager))
assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/vol1"), kubelet.volumeManager))
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
assert.NoError(t, volumetest.VerifyDetachCallCount(
1 /* expectedDetachCallCount */, testKubelet.volumePlugin))
@ -234,28 +234,28 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{
VolumesAttached: []api.AttachedVolume{
return true, &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesAttached: []v1.AttachedVolume{
{
Name: "fake/vol1",
DevicePath: "fake/path",
},
}},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
Spec: v1.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
@ -268,11 +268,11 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
close(stopCh)
}()
kubelet.podManager.SetPods([]*api.Pod{pod})
kubelet.podManager.SetPods([]*v1.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
api.UniqueVolumeName("fake/vol1"),
v1.UniqueVolumeName("fake/vol1"),
stopCh,
kubelet.volumeManager)
@ -302,28 +302,28 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
kubeClient := testKubelet.fakeKubeClient
kubeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &api.Node{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{
VolumesAttached: []api.AttachedVolume{
return true, &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesAttached: []v1.AttachedVolume{
{
Name: "fake/vol1",
DevicePath: "fake/path",
},
}},
Spec: api.NodeSpec{ExternalID: testKubeletHostname},
Spec: v1.NodeSpec{ExternalID: testKubeletHostname},
}, nil
})
kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, nil, fmt.Errorf("no reaction implemented for %s", action)
})
pod := podWithUidNameNsSpec("12345678", "foo", "test", api.PodSpec{
Volumes: []api.Volume{
pod := podWithUidNameNsSpec("12345678", "foo", "test", v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "vol1",
VolumeSource: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "fake-device",
},
},
@ -337,11 +337,11 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
}()
// Add pod
kubelet.podManager.SetPods([]*api.Pod{pod})
kubelet.podManager.SetPods([]*v1.Pod{pod})
// Fake node status update
go simulateVolumeInUseUpdate(
api.UniqueVolumeName("fake/vol1"),
v1.UniqueVolumeName("fake/vol1"),
stopCh,
kubelet.volumeManager)
@ -367,7 +367,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
1 /* expectedSetUpCallCount */, testKubelet.volumePlugin))
// Remove pod
kubelet.podManager.SetPods([]*api.Pod{})
kubelet.podManager.SetPods([]*v1.Pod{})
assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod))
@ -382,7 +382,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin))
// Verify volumes detached and no longer reported as in use
assert.NoError(t, waitForVolumeDetach(api.UniqueVolumeName("fake/vol1"), kubelet.volumeManager))
assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/vol1"), kubelet.volumeManager))
assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin))
}

View File

@ -15,5 +15,5 @@ limitations under the License.
*/
// Package kuberuntime contains an implementation of kubecontainer.Runtime using
// the interface in pkg/kubelet/api.
// the interface in pkg/kubelet/v1.
package kuberuntime

View File

@ -22,7 +22,7 @@ import (
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
@ -49,7 +49,7 @@ func (f *fakeHTTP) Get(url string) (*http.Response, error) {
// fakeRuntimeHelper implements kubecontainer.RuntimeHelper interfaces for testing purposes.
type fakeRuntimeHelper struct{}
func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container *api.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (*kubecontainer.RunContainerOptions, error) {
var opts kubecontainer.RunContainerOptions
if len(container.TerminationMessagePath) != 0 {
testPodContainerDir, err := ioutil.TempDir("", "fooPodContainerDir")
@ -61,12 +61,12 @@ func (f *fakeRuntimeHelper) GenerateRunContainerOptions(pod *api.Pod, container
return &opts, nil
}
func (f *fakeRuntimeHelper) GetClusterDNS(pod *api.Pod) ([]string, []string, error) {
func (f *fakeRuntimeHelper) GetClusterDNS(pod *v1.Pod) ([]string, []string, error) {
return nil, nil, nil
}
// This is not used by docker runtime.
func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *api.Pod) (string, string, error) {
func (f *fakeRuntimeHelper) GeneratePodHostNameAndDomain(pod *v1.Pod) (string, string, error) {
return "", "", nil
}
@ -74,19 +74,19 @@ func (f *fakeRuntimeHelper) GetPodDir(kubetypes.UID) string {
return ""
}
func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 {
func (f *fakeRuntimeHelper) GetExtraSupplementalGroupsForPod(pod *v1.Pod) []int64 {
return nil
}
type fakePodGetter struct {
pods map[types.UID]*api.Pod
pods map[types.UID]*v1.Pod
}
func newFakePodGetter() *fakePodGetter {
return &fakePodGetter{make(map[types.UID]*api.Pod)}
return &fakePodGetter{make(map[types.UID]*v1.Pod)}
}
func (f *fakePodGetter) GetPodByUID(uid types.UID) (*api.Pod, bool) {
func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
pod, found := f.pods[uid]
return pod, found
}

View File

@ -22,7 +22,7 @@ import (
"strconv"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/types"
@ -85,12 +85,12 @@ func toKubeContainerState(state runtimeApi.ContainerState) kubecontainer.Contain
return kubecontainer.ContainerStateUnknown
}
// toRuntimeProtocol converts api.Protocol to runtimeApi.Protocol.
func toRuntimeProtocol(protocol api.Protocol) runtimeApi.Protocol {
// toRuntimeProtocol converts v1.Protocol to runtimeApi.Protocol.
func toRuntimeProtocol(protocol v1.Protocol) runtimeApi.Protocol {
switch protocol {
case api.ProtocolTCP:
case v1.ProtocolTCP:
return runtimeApi.Protocol_TCP
case api.ProtocolUDP:
case v1.ProtocolUDP:
return runtimeApi.Protocol_UDP
}
@ -131,7 +131,7 @@ func (m *kubeGenericRuntimeManager) sandboxToKubeContainer(s *runtimeApi.PodSand
}
// getContainerSpec gets the container spec by containerName.
func getContainerSpec(pod *api.Pod, containerName string) *api.Container {
func getContainerSpec(pod *v1.Pod, containerName string) *v1.Container {
for i, c := range pod.Spec.Containers {
if containerName == c.Name {
return &pod.Spec.Containers[i]
@ -217,7 +217,7 @@ func milliCPUToQuota(milliCPU int64) (quota int64, period int64) {
// getStableKey generates a key (string) to uniquely identify a
// (pod, container) tuple. The key should include the content of the
// container, so that any change to the container generates a new key.
func getStableKey(pod *api.Pod, container *api.Container) string {
func getStableKey(pod *v1.Pod, container *v1.Container) string {
hash := strconv.FormatUint(kubecontainer.HashContainer(container), 16)
return fmt.Sprintf("%s_%s_%s_%s_%s", pod.Name, pod.Namespace, string(pod.UID), container.Name, hash)
}

View File

@ -20,22 +20,22 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
func TestStableKey(t *testing.T) {
container := &api.Container{
container := &v1.Container{
Name: "test_container",
Image: "foo/image:v1",
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
},
Spec: api.PodSpec{
Containers: []api.Container{*container},
Spec: v1.PodSpec{
Containers: []v1.Container{*container},
},
}
oldKey := getStableKey(pod, container)

View File

@ -29,8 +29,8 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
@ -49,7 +49,7 @@ import (
// * create the container
// * start the container
// * run the post start lifecycle hooks (if applicable)
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeApi.PodSandboxConfig, container *api.Container, pod *api.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, podIP string) (string, error) {
func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandboxConfig *runtimeApi.PodSandboxConfig, container *v1.Container, pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, podIP string) (string, error) {
// Step 1: pull the image.
err, msg := m.imagePuller.EnsureImageExists(pod, container, pullSecrets)
if err != nil {
@ -72,15 +72,15 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
containerConfig, err := m.generateContainerConfig(container, pod, restartCount, podIP)
if err != nil {
m.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err)
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err)
return "Generate Container Config Failed", err
}
containerID, err := m.runtimeService.CreateContainer(podSandboxID, containerConfig, podSandboxConfig)
if err != nil {
m.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err)
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToCreateContainer, "Failed to create container with error: %v", err)
return "Create Container Failed", err
}
m.recorder.Eventf(ref, api.EventTypeNormal, events.CreatedContainer, "Created container with id %v", containerID)
m.recorder.Eventf(ref, v1.EventTypeNormal, events.CreatedContainer, "Created container with id %v", containerID)
if ref != nil {
m.containerRefManager.SetRef(kubecontainer.ContainerID{
Type: m.runtimeName,
@ -91,11 +91,11 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
// Step 3: start the container.
err = m.runtimeService.StartContainer(containerID)
if err != nil {
m.recorder.Eventf(ref, api.EventTypeWarning, events.FailedToStartContainer,
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedToStartContainer,
"Failed to start container with id %v with error: %v", containerID, err)
return "Start Container Failed", err
}
m.recorder.Eventf(ref, api.EventTypeNormal, events.StartedContainer, "Started container with id %v", containerID)
m.recorder.Eventf(ref, v1.EventTypeNormal, events.StartedContainer, "Started container with id %v", containerID)
// Symlink container logs to the legacy container log location for cluster logging
// support.
@ -119,7 +119,7 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
msg, handlerErr := m.runner.Run(kubeContainerID, pod, container, container.Lifecycle.PostStart)
if handlerErr != nil {
err := fmt.Errorf("PostStart handler: %v", handlerErr)
m.generateContainerEvent(kubeContainerID, api.EventTypeWarning, events.FailedPostStartHook, msg)
m.generateContainerEvent(kubeContainerID, v1.EventTypeWarning, events.FailedPostStartHook, msg)
m.killContainer(pod, kubeContainerID, container.Name, "FailedPostStartHook", nil)
return "PostStart Hook Failed", err
}
@ -128,8 +128,8 @@ func (m *kubeGenericRuntimeManager) startContainer(podSandboxID string, podSandb
return "", nil
}
// generateContainerConfig generates container config for kubelet runtime api.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *api.Container, pod *api.Pod, restartCount int, podIP string) (*runtimeApi.ContainerConfig, error) {
// generateContainerConfig generates container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateContainerConfig(container *v1.Container, pod *v1.Pod, restartCount int, podIP string) (*runtimeApi.ContainerConfig, error) {
opts, err := m.runtimeHelper.GenerateRunContainerOptions(pod, container, podIP)
if err != nil {
return nil, err
@ -185,8 +185,8 @@ func (m *kubeGenericRuntimeManager) generateContainerConfig(container *api.Conta
return config, nil
}
// generateLinuxContainerConfig generates linux container config for kubelet runtime api.
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *api.Container, pod *api.Pod, uid *int64, username *string) *runtimeApi.LinuxContainerConfig {
// generateLinuxContainerConfig generates linux container config for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *v1.Container, pod *v1.Pod, uid *int64, username *string) *runtimeApi.LinuxContainerConfig {
lc := &runtimeApi.LinuxContainerConfig{
Resources: &runtimeApi.LinuxContainerResources{},
SecurityContext: m.determineEffectiveSecurityContext(pod, container, uid, username),
@ -228,7 +228,7 @@ func (m *kubeGenericRuntimeManager) generateLinuxContainerConfig(container *api.
return lc
}
// makeDevices generates container devices for kubelet runtime api.
// makeDevices generates container devices for kubelet runtime v1.
func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device {
devices := make([]*runtimeApi.Device, len(opts.Devices))
@ -244,8 +244,8 @@ func makeDevices(opts *kubecontainer.RunContainerOptions) []*runtimeApi.Device {
return devices
}
// makeMounts generates container volume mounts for kubelet runtime api.
func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *api.Container) []*runtimeApi.Mount {
// makeMounts generates container volume mounts for kubelet runtime v1.
func (m *kubeGenericRuntimeManager) makeMounts(opts *kubecontainer.RunContainerOptions, container *v1.Container) []*runtimeApi.Mount {
volumeMounts := []*runtimeApi.Mount{}
for idx := range opts.Mounts {
@ -416,7 +416,7 @@ func (m *kubeGenericRuntimeManager) generateContainerEvent(containerID kubeconta
}
// executePreStopHook runs the pre-stop lifecycle hooks if applicable and returns the duration it takes.
func (m *kubeGenericRuntimeManager) executePreStopHook(pod *api.Pod, containerID kubecontainer.ContainerID, containerSpec *api.Container, gracePeriod int64) int64 {
func (m *kubeGenericRuntimeManager) executePreStopHook(pod *v1.Pod, containerID kubecontainer.ContainerID, containerSpec *v1.Container, gracePeriod int64) int64 {
glog.V(3).Infof("Running preStop hook for container %q", containerID.String())
start := unversioned.Now()
@ -426,7 +426,7 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *api.Pod, containerID
defer utilruntime.HandleCrash()
if msg, err := m.runner.Run(containerID, pod, containerSpec, containerSpec.Lifecycle.PreStop); err != nil {
glog.Errorf("preStop hook for container %q failed: %v", containerSpec.Name, err)
m.generateContainerEvent(containerID, api.EventTypeWarning, events.FailedPreStopHook, msg)
m.generateContainerEvent(containerID, v1.EventTypeWarning, events.FailedPreStopHook, msg)
}
}()
@ -448,9 +448,9 @@ func (m *kubeGenericRuntimeManager) executePreStopHook(pod *api.Pod, containerID
// TODO(random-liu): Add a node e2e test to test this behaviour.
// TODO(random-liu): Change the lifecycle handler to just accept information needed, so that we can
// just pass the needed function not create the fake object.
func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*api.Pod, *api.Container, error) {
var pod *api.Pod
var container *api.Container
func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID kubecontainer.ContainerID) (*v1.Pod, *v1.Container, error) {
var pod *v1.Pod
var container *v1.Container
s, err := m.runtimeService.ContainerStatus(containerID.ID)
if err != nil {
return nil, nil, err
@ -460,24 +460,24 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
a := getContainerInfoFromAnnotations(s.Annotations)
// Notice that the followings are not full spec. The container killing code should not use
// un-restored fields.
pod = &api.Pod{
ObjectMeta: api.ObjectMeta{
pod = &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: l.PodUID,
Name: l.PodName,
Namespace: l.PodNamespace,
DeletionGracePeriodSeconds: a.PodDeletionGracePeriod,
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: a.PodTerminationGracePeriod,
},
}
container = &api.Container{
container = &v1.Container{
Name: l.ContainerName,
Ports: a.ContainerPorts,
TerminationMessagePath: a.TerminationMessagePath,
}
if a.PreStopHandler != nil {
container.Lifecycle = &api.Lifecycle{
container.Lifecycle = &v1.Lifecycle{
PreStop: a.PreStopHandler,
}
}
@ -487,8 +487,8 @@ func (m *kubeGenericRuntimeManager) restoreSpecsFromContainerLabels(containerID
// killContainer kills a container through the following steps:
// * Run the pre-stop lifecycle hooks (if applicable).
// * Stop the container.
func (m *kubeGenericRuntimeManager) killContainer(pod *api.Pod, containerID kubecontainer.ContainerID, containerName string, reason string, gracePeriodOverride *int64) error {
var containerSpec *api.Container
func (m *kubeGenericRuntimeManager) killContainer(pod *v1.Pod, containerID kubecontainer.ContainerID, containerName string, reason string, gracePeriodOverride *int64) error {
var containerSpec *v1.Container
if pod != nil {
containerSpec = getContainerSpec(pod, containerName)
} else {
@ -534,14 +534,14 @@ func (m *kubeGenericRuntimeManager) killContainer(pod *api.Pod, containerID kube
if reason != "" {
message = fmt.Sprint(message, ":", reason)
}
m.generateContainerEvent(containerID, api.EventTypeNormal, events.KillingContainer, message)
m.generateContainerEvent(containerID, v1.EventTypeNormal, events.KillingContainer, message)
m.containerRefManager.ClearRef(containerID)
return err
}
// killContainersWithSyncResult kills all pod's containers with sync results.
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (syncResults []*kubecontainer.SyncResult) {
containerResults := make(chan *kubecontainer.SyncResult, len(runningPod.Containers))
wg := sync.WaitGroup{}
@ -570,7 +570,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(pod *api.Pod, r
// pruneInitContainers ensures that before we begin creating init containers, we have reduced the number
// of outstanding init containers still present. This reduces load on the container garbage collector
// by only preserving the most recent terminated init container.
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) {
func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *v1.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) {
// only the last execution of each init container should be preserved, and only preserve it if it is in the
// list of init containers to keep.
initContainerNames := sets.NewString()
@ -614,7 +614,7 @@ func (m *kubeGenericRuntimeManager) pruneInitContainersBeforeStart(pod *api.Pod,
// next init container to start, or done if there are no further init containers.
// Status is only returned if an init container is failed, in which case next will
// point to the current container.
func findNextInitContainerToRun(pod *api.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.ContainerStatus, next *api.Container, done bool) {
func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.ContainerStatus, next *v1.Container, done bool) {
if len(pod.Spec.InitContainers) == 0 {
return nil, nil, true
}
@ -656,7 +656,7 @@ func findNextInitContainerToRun(pod *api.Pod, podStatus *kubecontainer.PodStatus
}
// GetContainerLogs returns logs of a specific container.
func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *api.Pod, containerID kubecontainer.ContainerID, logOptions *api.PodLogOptions, stdout, stderr io.Writer) (err error) {
func (m *kubeGenericRuntimeManager) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) (err error) {
status, err := m.runtimeService.ContainerStatus(containerID.ID)
if err != nil {
return fmt.Errorf("failed to get container status %q: %v", containerID, err)

View File

@ -21,7 +21,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
@ -29,18 +29,18 @@ import (
// TestRemoveContainer tests removing the container and its corresponding container logs.
func TestRemoveContainer(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "bar",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
@ -34,12 +34,12 @@ func TestSandboxGC(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pods := []*api.Pod{
makeTestPod("foo1", "new", "1234", []api.Container{
pods := []*v1.Pod{
makeTestPod("foo1", "new", "1234", []v1.Container{
makeTestContainer("bar1", "busybox"),
makeTestContainer("bar2", "busybox"),
}),
makeTestPod("foo2", "new", "5678", []api.Container{
makeTestPod("foo2", "new", "5678", []v1.Container{
makeTestContainer("bar3", "busybox"),
}),
}
@ -129,7 +129,7 @@ func TestContainerGC(t *testing.T) {
fakePodGetter := m.containerGC.podGetter.(*fakePodGetter)
makeGCContainer := func(podName, containerName string, attempt int, createdAt int64, state runtimeApi.ContainerState) containerTemplate {
container := makeTestContainer(containerName, "test-image")
pod := makeTestPod(podName, "test-ns", podName, []api.Container{container})
pod := makeTestPod(podName, "test-ns", podName, []v1.Container{container})
if podName != "deleted" {
// initialize the pod getter, explicitly exclude deleted pod
fakePodGetter.pods[pod.UID] = pod

View File

@ -18,7 +18,7 @@ package kuberuntime
import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/credentialprovider"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -28,7 +28,7 @@ import (
// PullImage pulls an image from the network to local storage using the supplied
// secrets if necessary.
func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []api.Secret) error {
func (m *kubeGenericRuntimeManager) PullImage(image kubecontainer.ImageSpec, pullSecrets []v1.Secret) error {
img := image.Image
repoToPull, _, _, err := parsers.ParseImageName(img)
if err != nil {

View File

@ -31,7 +31,7 @@ import (
"github.com/fsnotify/fsnotify"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
)
// Notice that the current kuberuntime logs implementation doesn't handle
@ -85,8 +85,8 @@ type logOptions struct {
timestamp bool
}
// newLogOptions convert the api.PodLogOptions to internal logOptions.
func newLogOptions(apiOpts *api.PodLogOptions, now time.Time) *logOptions {
// newLogOptions convert the v1.PodLogOptions to internal logOptions.
func newLogOptions(apiOpts *v1.PodLogOptions, now time.Time) *logOptions {
opts := &logOptions{
tail: -1, // -1 by default which means read all logs.
bytes: -1, // -1 by default which means read all logs.
@ -109,14 +109,14 @@ func newLogOptions(apiOpts *api.PodLogOptions, now time.Time) *logOptions {
}
// ReadLogs read the container log and redirect into stdout and stderr.
func ReadLogs(path string, apiOpts *api.PodLogOptions, stdout, stderr io.Writer) error {
func ReadLogs(path string, apiOpts *v1.PodLogOptions, stdout, stderr io.Writer) error {
f, err := os.Open(path)
if err != nil {
return fmt.Errorf("failed to open log file %q: %v", path, err)
}
defer f.Close()
// Convert api.PodLogOptions into internal log options.
// Convert v1.PodLogOptions into internal log options.
opts := newLogOptions(apiOpts, time.Now())
// Search start point based on tail line.

View File

@ -24,8 +24,8 @@ import (
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
)
func TestLogOptions(t *testing.T) {
@ -36,27 +36,27 @@ func TestLogOptions(t *testing.T) {
sinceseconds = int64(10)
)
for c, test := range []struct {
apiOpts *api.PodLogOptions
apiOpts *v1.PodLogOptions
expect *logOptions
}{
{ // empty options
apiOpts: &api.PodLogOptions{},
apiOpts: &v1.PodLogOptions{},
expect: &logOptions{tail: -1, bytes: -1},
},
{ // test tail lines
apiOpts: &api.PodLogOptions{TailLines: &line},
apiOpts: &v1.PodLogOptions{TailLines: &line},
expect: &logOptions{tail: line, bytes: -1},
},
{ // test limit bytes
apiOpts: &api.PodLogOptions{LimitBytes: &bytes},
apiOpts: &v1.PodLogOptions{LimitBytes: &bytes},
expect: &logOptions{tail: -1, bytes: bytes},
},
{ // test since timestamp
apiOpts: &api.PodLogOptions{SinceTime: &timestamp},
apiOpts: &v1.PodLogOptions{SinceTime: &timestamp},
expect: &logOptions{tail: -1, bytes: -1, since: timestamp.Time},
},
{ // test since seconds
apiOpts: &api.PodLogOptions{SinceSeconds: &sinceseconds},
apiOpts: &v1.PodLogOptions{SinceSeconds: &sinceseconds},
expect: &logOptions{tail: -1, bytes: -1, since: timestamp.Add(-10 * time.Second)},
},
} {

View File

@ -26,7 +26,7 @@ import (
"github.com/golang/glog"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/credentialprovider"
internalApi "k8s.io/kubernetes/pkg/kubelet/api"
@ -64,7 +64,7 @@ var (
// A subset of the pod.Manager interface extracted for garbage collection purposes.
type podGetter interface {
GetPodByUID(kubetypes.UID) (*api.Pod, bool)
GetPodByUID(kubetypes.UID) (*v1.Pod, bool)
}
type kubeGenericRuntimeManager struct {
@ -349,7 +349,7 @@ func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, err
// containerToKillInfo contains neccessary information to kill a container.
type containerToKillInfo struct {
// The spec of the container.
container *api.Container
container *v1.Container
// The name of the container.
name string
// The message indicates why the container will be killed.
@ -388,7 +388,7 @@ type podContainerSpecChanges struct {
// podSandboxChanged checks whether the spec of the pod is changed and returns
// (changed, new attempt, original sandboxID if exist).
func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *api.Pod, podStatus *kubecontainer.PodStatus) (changed bool, attempt uint32, sandboxID string) {
func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (changed bool, attempt uint32, sandboxID string) {
if len(podStatus.SandboxStatuses) == 0 {
glog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
return true, 0, ""
@ -420,7 +420,7 @@ func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *api.Pod, podStatus *k
// checkAndKeepInitContainers keeps all successfully completed init containers. If there
// are failing containers, only keep the first failing one.
func checkAndKeepInitContainers(pod *api.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) bool {
func checkAndKeepInitContainers(pod *v1.Pod, podStatus *kubecontainer.PodStatus, initContainersToKeep map[kubecontainer.ContainerID]int) bool {
initFailed := false
for i, container := range pod.Spec.InitContainers {
@ -448,7 +448,7 @@ func checkAndKeepInitContainers(pod *api.Pod, podStatus *kubecontainer.PodStatus
}
// computePodContainerChanges checks whether the pod spec has changed and returns the changes if true.
func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges {
func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podContainerSpecChanges {
glog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)
sandboxChanged, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)
@ -484,7 +484,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
continue
}
if sandboxChanged {
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
message := fmt.Sprintf("Container %+v's pod sandbox is dead, the container will be recreated.", container)
glog.Info(message)
changes.ContainersToStart[index] = message
@ -496,7 +496,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
// Initialization failed and Container exists.
// If we have an initialization failure everything will be killed anyway.
// If RestartPolicy is Always or OnFailure we restart containers that were running before.
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
message := fmt.Sprintf("Failed to initialize pod. %q will be restarted.", container.Name)
glog.V(1).Info(message)
changes.ContainersToStart[index] = message
@ -519,7 +519,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
changes.ContainersToKeep[containerStatus.ID] = index
continue
}
if pod.Spec.RestartPolicy != api.RestartPolicyNever {
if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
message := fmt.Sprintf("pod %q container %q is unhealthy, it will be killed and re-created.", format.Pod(pod), container.Name)
glog.Info(message)
changes.ContainersToStart[index] = message
@ -537,7 +537,7 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
_, keep := changes.ContainersToKeep[containerStatus.ID]
_, keepInit := changes.InitContainersToKeep[containerStatus.ID]
if !keep && !keepInit {
var podContainer *api.Container
var podContainer *v1.Container
var killMessage string
for i, c := range pod.Spec.Containers {
if c.Name == containerStatus.Name {
@ -566,19 +566,19 @@ func (m *kubeGenericRuntimeManager) computePodContainerChanges(pod *api.Pod, pod
// 4. Create sandbox if necessary.
// 5. Create init containers.
// 6. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, _ v1.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
// Step 1: Compute sandbox and container changes.
podContainerChanges := m.computePodContainerChanges(pod, podStatus)
glog.V(3).Infof("computePodContainerChanges got %+v for pod %q", podContainerChanges, format.Pod(pod))
if podContainerChanges.CreateSandbox {
ref, err := api.GetReference(pod)
ref, err := v1.GetReference(pod)
if err != nil {
glog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
}
if podContainerChanges.SandboxID != "" {
m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxChanged", "Pod sandbox changed, it will be killed and re-created.")
m.recorder.Eventf(ref, v1.EventTypeNormal, "SandboxChanged", "Pod sandbox changed, it will be killed and re-created.")
} else {
m.recorder.Eventf(ref, api.EventTypeNormal, "SandboxReceived", "Pod sandbox received, it will be created.")
m.recorder.Eventf(ref, v1.EventTypeNormal, "SandboxReceived", "Pod sandbox received, it will be created.")
}
}
@ -674,7 +674,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podSt
initContainerResult := kubecontainer.NewSyncResult(kubecontainer.InitContainer, status.Name)
initContainerResult.Fail(kubecontainer.ErrRunInitContainer, fmt.Sprintf("init container %q exited with %d", status.Name, status.ExitCode))
result.AddSyncResult(initContainerResult)
if pod.Spec.RestartPolicy == api.RestartPolicyNever {
if pod.Spec.RestartPolicy == v1.RestartPolicyNever {
utilruntime.HandleError(fmt.Errorf("error running pod %q init container %q, restart=Never: %#v", format.Pod(pod), status.Name, status))
return
}
@ -745,7 +745,7 @@ func (m *kubeGenericRuntimeManager) SyncPod(pod *api.Pod, _ api.PodStatus, podSt
// If a container is still in backoff, the function will return a brief backoff error and
// a detailed error message.
func (m *kubeGenericRuntimeManager) doBackOff(pod *api.Pod, container *api.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
var cStatus *kubecontainer.ContainerStatus
for _, c := range podStatus.ContainerStatuses {
if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited {
@ -765,7 +765,7 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *api.Pod, container *api.Conta
key := getStableKey(pod, container)
if backOff.IsInBackOffSince(key, ts) {
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
m.recorder.Eventf(ref, api.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container")
m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container")
}
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
glog.Infof("%s", err.Error())
@ -780,14 +780,14 @@ func (m *kubeGenericRuntimeManager) doBackOff(pod *api.Pod, container *api.Conta
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
func (m *kubeGenericRuntimeManager) KillPod(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride)
return err.Error()
}
// killPodWithSyncResult kills a runningPod and returns SyncResult.
// Note: The pod passed in could be *nil* when kubelet restarted.
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *api.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride)
for _, containerResult := range killContainerResults {
result.AddSyncResult(containerResult)
@ -808,7 +808,7 @@ func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *api.Pod, runningP
}
// isHostNetwork checks whether the pod is running in host-network mode.
func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *api.Pod) (bool, error) {
func (m *kubeGenericRuntimeManager) isHostNetwork(podSandBoxID string, pod *v1.Pod) (bool, error) {
if pod != nil {
return kubecontainer.IsHostNetworkPod(pod), nil
}
@ -848,8 +848,8 @@ func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namesp
return nil, err
}
podFullName := format.Pod(&api.Pod{
ObjectMeta: api.ObjectMeta{
podFullName := format.Pod(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
UID: uid,

View File

@ -24,7 +24,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
apitest "k8s.io/kubernetes/pkg/kubelet/api/testing"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
@ -63,7 +63,7 @@ func createTestRuntimeManager() (*apitest.FakeRuntimeService, *apitest.FakeImage
// sandboxTemplate is a sandbox template to create fake sandbox.
type sandboxTemplate struct {
pod *api.Pod
pod *v1.Pod
attempt uint32
createdAt int64
state runtimeApi.PodSandboxState
@ -71,8 +71,8 @@ type sandboxTemplate struct {
// containerTemplate is a container template to create fake container.
type containerTemplate struct {
pod *api.Pod
container *api.Container
pod *v1.Pod
container *v1.Container
sandboxAttempt uint32
attempt int
createdAt int64
@ -82,7 +82,7 @@ type containerTemplate struct {
// makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and
// one fake container for each of its container.
func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRuntimeService,
pod *api.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) {
pod *v1.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) {
sandbox := makeFakePodSandbox(t, m, sandboxTemplate{
pod: pod,
createdAt: fakeCreatedAt,
@ -90,7 +90,7 @@ func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *
})
var containers []*apitest.FakeContainer
newTemplate := func(c *api.Container) containerTemplate {
newTemplate := func(c *v1.Container) containerTemplate {
return containerTemplate{
pod: pod,
container: c,
@ -177,22 +177,22 @@ func makeFakeContainers(t *testing.T, m *kubeGenericRuntimeManager, templates []
}
// makeTestContainer creates a test api container.
func makeTestContainer(name, image string) api.Container {
return api.Container{
func makeTestContainer(name, image string) v1.Container {
return v1.Container{
Name: name,
Image: image,
}
}
// makeTestPod creates a test api pod.
func makeTestPod(podName, podNamespace, podUID string, containers []api.Container) *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod {
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: types.UID(podUID),
Name: podName,
Namespace: podNamespace,
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
Containers: containers,
},
}
@ -256,25 +256,25 @@ func TestGetPodStatus(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
containers := []api.Container{
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
Containers: containers,
},
}
@ -294,14 +294,14 @@ func TestGetPods(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
@ -370,14 +370,14 @@ func TestGetPodContainerID(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
@ -417,14 +417,14 @@ func TestGetNetNS(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
@ -449,14 +449,14 @@ func TestKillPod(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: []api.Container{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "foo1",
Image: "busybox",
@ -520,31 +520,31 @@ func TestSyncPod(t *testing.T) {
fakeRuntime, fakeImage, m, err := createTestRuntimeManager()
assert.NoError(t, err)
containers := []api.Container{
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
Containers: containers,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
result := m.SyncPod(pod, api.PodStatus{}, &kubecontainer.PodStatus{}, []api.Secret{}, backOff)
result := m.SyncPod(pod, v1.PodStatus{}, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 2, len(fakeImage.Images))
@ -563,14 +563,14 @@ func TestPruneInitContainers(t *testing.T) {
init1 := makeTestContainer("init1", "busybox")
init2 := makeTestContainer("init2", "busybox")
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
InitContainers: []api.Container{init1, init2},
Spec: v1.PodSpec{
InitContainers: []v1.Container{init1, init2},
},
}
@ -598,32 +598,32 @@ func TestSyncPodWithInitContainers(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
initContainers := []api.Container{
initContainers := []v1.Container{
{
Name: "init1",
Image: "init",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
}
containers := []api.Container{
containers := []v1.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: api.PullIfNotPresent,
ImagePullPolicy: v1.PullIfNotPresent,
},
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
Containers: containers,
InitContainers: initContainers,
},
@ -631,7 +631,7 @@ func TestSyncPodWithInitContainers(t *testing.T) {
// buildContainerID is an internal helper function to build container id from api pod
// and container with default attempt number 0.
buildContainerID := func(pod *api.Pod, container api.Container) string {
buildContainerID := func(pod *v1.Pod, container v1.Container) string {
uid := string(pod.UID)
sandboxID := apitest.BuildSandboxName(&runtimeApi.PodSandboxMetadata{
Name: &pod.Name,
@ -646,7 +646,7 @@ func TestSyncPodWithInitContainers(t *testing.T) {
// 1. should only create the init container.
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result := m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
result := m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 1, len(fakeRuntime.Containers))
initContainerID := buildContainerID(pod, initContainers[0])
@ -658,7 +658,7 @@ func TestSyncPodWithInitContainers(t *testing.T) {
// 2. should not create app container because init container is still running.
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 1, len(fakeRuntime.Containers))
expectedContainers = []string{initContainerID}
@ -670,7 +670,7 @@ func TestSyncPodWithInitContainers(t *testing.T) {
fakeRuntime.StopContainer(initContainerID, 0)
podStatus, err = m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result = m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
result = m.SyncPod(pod, v1.PodStatus{}, podStatus, []v1.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 3, len(fakeRuntime.Containers))
expectedContainers = []string{initContainerID, buildContainerID(pod, containers[0]),

View File

@ -23,7 +23,7 @@ import (
"sort"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
runtimeApi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/types"
@ -32,7 +32,7 @@ import (
)
// createPodSandbox creates a pod sandbox and returns (podSandBoxID, message, error).
func (m *kubeGenericRuntimeManager) createPodSandbox(pod *api.Pod, attempt uint32) (string, string, error) {
func (m *kubeGenericRuntimeManager) createPodSandbox(pod *v1.Pod, attempt uint32) (string, string, error) {
podSandboxConfig, err := m.generatePodSandboxConfig(pod, attempt)
if err != nil {
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
@ -58,8 +58,8 @@ func (m *kubeGenericRuntimeManager) createPodSandbox(pod *api.Pod, attempt uint3
return podSandBoxID, "", nil
}
// generatePodSandboxConfig generates pod sandbox config from api.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *api.Pod, attempt uint32) (*runtimeApi.PodSandboxConfig, error) {
// generatePodSandboxConfig generates pod sandbox config from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *v1.Pod, attempt uint32) (*runtimeApi.PodSandboxConfig, error) {
// TODO: deprecating podsandbox resource requirements in favor of the pod level cgroup
// Refer https://github.com/kubernetes/kubernetes/issues/29871
podUID := string(pod.UID)
@ -128,8 +128,8 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxConfig(pod *api.Pod, attem
return podSandboxConfig, nil
}
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from api.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *api.Pod, cgroupParent string) *runtimeApi.LinuxPodSandboxConfig {
// generatePodSandboxLinuxConfig generates LinuxPodSandboxConfig from v1.Pod.
func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *v1.Pod, cgroupParent string) *runtimeApi.LinuxPodSandboxConfig {
if pod.Spec.SecurityContext == nil && cgroupParent == "" {
return nil
}
@ -142,9 +142,9 @@ func (m *kubeGenericRuntimeManager) generatePodSandboxLinuxConfig(pod *api.Pod,
sc := pod.Spec.SecurityContext
lc.SecurityContext = &runtimeApi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeApi.NamespaceOption{
HostNetwork: &sc.HostNetwork,
HostIpc: &sc.HostIPC,
HostPid: &sc.HostPID,
HostNetwork: &pod.Spec.HostNetwork,
HostIpc: &pod.Spec.HostIPC,
HostPid: &pod.Spec.HostPID,
},
RunAsUser: sc.RunAsUser,
}

Some files were not shown because too many files have changed in this diff Show More