cmd/kube-controller-manager

This commit is contained in:
Chao Xu
2016-11-18 12:50:17 -08:00
parent 48536eaef9
commit 7eeb71f698
109 changed files with 4380 additions and 4153 deletions

View File

@@ -22,9 +22,9 @@ import (
"net"
"sync"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
@@ -50,8 +50,8 @@ type nodeAndCIDR struct {
// CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes.
type CIDRAllocator interface {
AllocateOrOccupyCIDR(node *api.Node) error
ReleaseCIDR(node *api.Node) error
AllocateOrOccupyCIDR(node *v1.Node) error
ReleaseCIDR(node *v1.Node) error
}
type rangeAllocator struct {
@@ -72,9 +72,9 @@ type rangeAllocator struct {
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator
// can initialize its CIDR map. NodeList is only nil in testing.
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *api.NodeList) (CIDRAllocator, error) {
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "cidrAllocator"})
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof)
ra := &rangeAllocator{
@@ -145,7 +145,7 @@ func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
r.nodesInProcessing.Delete(nodeName)
}
func (r *rangeAllocator) occupyCIDR(node *api.Node) error {
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
defer r.removeNodeFromProcessing(node.Name)
if node.Spec.PodCIDR == "" {
return nil
@@ -164,7 +164,7 @@ func (r *rangeAllocator) occupyCIDR(node *api.Node) error {
// if it doesn't currently have one or mark the CIDR as used if the node already have one.
// WARNING: If you're adding any return calls or defer any more work from this function
// you have to handle correctly nodesInProcessing.
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *api.Node) error {
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil {
return nil
}
@@ -191,7 +191,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *api.Node) error {
}
// ReleaseCIDR releases the CIDR of the removed node
func (r *rangeAllocator) ReleaseCIDR(node *api.Node) error {
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
if node == nil || node.Spec.PodCIDR == "" {
return nil
}
@@ -225,7 +225,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
// Assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error
var node *api.Node
var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName)
for rep := 0; rep < podCIDRUpdateRetry; rep++ {
// TODO: change it to using PATCH instead of full Node updates.

View File

@@ -21,8 +21,8 @@ import (
"testing"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/util/wait"
)
@@ -52,9 +52,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{
Existing: []*v1.Node{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
@@ -72,9 +72,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
{
description: "Correctly filter out ServiceCIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{
Existing: []*v1.Node{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
@@ -96,9 +96,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
{
description: "Correctly ignore already allocated CIDRs",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{
Existing: []*v1.Node{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
@@ -182,9 +182,9 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
{
description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{
Existing: []*v1.Node{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
@@ -265,9 +265,9 @@ func TestReleaseCIDRSuccess(t *testing.T) {
{
description: "Correctly release preallocated CIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{
Existing: []*v1.Node{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
@@ -288,9 +288,9 @@ func TestReleaseCIDRSuccess(t *testing.T) {
{
description: "Correctly recycle CIDR",
fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{
Existing: []*v1.Node{
{
ObjectMeta: api.ObjectMeta{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
},
@@ -357,8 +357,8 @@ func TestReleaseCIDRSuccess(t *testing.T) {
}
for _, cidrToRelease := range tc.cidrsToRelease {
nodeToRelease := api.Node{
ObjectMeta: api.ObjectMeta{
nodeToRelease := v1.Node{
ObjectMeta: v1.ObjectMeta{
Name: "node0",
},
}

View File

@@ -22,8 +22,9 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields"
@@ -46,9 +47,9 @@ const (
// if any pods were deleted, or were found pending deletion.
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
remaining := false
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName)
options := api.ListOptions{FieldSelector: selector}
pods, err := kubeClient.Core().Pods(api.NamespaceAll).List(options)
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
options := v1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
var updateErrList []error
if err != nil {
@@ -56,7 +57,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
}
if len(pods.Items) > 0 {
recordNodeEvent(recorder, nodeName, nodeUID, api.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
recordNodeEvent(recorder, nodeName, nodeUID, v1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
}
for _, pod := range pods.Items {
@@ -85,7 +86,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
}
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err
}
@@ -100,7 +101,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
// setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver,
// and returns an error if it encounters one.
func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeName string) (*api.Pod, error) {
func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
if pod.Status.Reason == node.NodeUnreachablePodReason {
return pod, nil
}
@@ -108,7 +109,7 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeN
pod.Status.Reason = node.NodeUnreachablePodReason
pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name)
var updatedPod *api.Pod
var updatedPod *v1.Pod
var err error
if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
return nil, err
@@ -116,10 +117,10 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeN
return updatedPod, nil
}
func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) error {
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
var zero int64
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
if err == nil {
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
}
@@ -138,14 +139,14 @@ func forcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
// that should not be gracefully terminated.
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
pod, ok := obj.(*api.Pod)
pod, ok := obj.(*v1.Pod)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj)
return
}
pod, ok = tombstone.Obj.(*api.Pod)
pod, ok = tombstone.Obj.(*v1.Pod)
if !ok {
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
return
@@ -176,7 +177,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
// TODO(mikedanese): this can be removed when we no longer
// guarantee backwards compatibility of master API to kubelets with
// versions less than 1.1.0
node := nodeObj.(*api.Node)
node := nodeObj.(*v1.Node)
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil {
glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
@@ -191,7 +192,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
// update ready status of all pods running on given node from master
// return true if success
func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
// Don't set pods to NotReady if the kubelet is running a version that
// doesn't understand how to correct readiness.
// TODO: Remove this check when we no longer guarantee backward compatibility
@@ -201,8 +202,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
}
nodeName := node.Name
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
opts := api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName)}
pods, err := kubeClient.Core().Pods(api.NamespaceAll).List(opts)
opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts)
if err != nil {
return err
}
@@ -215,8 +216,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
}
for i, cond := range pod.Status.Conditions {
if cond.Type == api.PodReady {
pod.Status.Conditions[i].Status = api.ConditionFalse
if cond.Type == v1.PodReady {
pod.Status.Conditions[i].Status = v1.ConditionFalse
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
if err != nil {
@@ -237,7 +238,7 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
// in the nodeInfo of the given node is "outdated", meaning < 1.2.0.
// Older versions were inflexible and modifying pod.Status directly through
// the apiserver would result in unexpected outcomes.
func nodeRunningOutdatedKubelet(node *api.Node) bool {
func nodeRunningOutdatedKubelet(node *v1.Node) bool {
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil {
glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err)
@@ -265,7 +266,7 @@ func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.Nod
}
func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
ref := &api.ObjectReference{
ref := &v1.ObjectReference{
Kind: "Node",
Name: nodeName,
UID: types.UID(nodeUID),
@@ -275,8 +276,8 @@ func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
func recordNodeStatusChange(recorder record.EventRecorder, node *api.Node, new_status string) {
ref := &api.ObjectReference{
func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_status string) {
ref := &v1.ObjectReference{
Kind: "Node",
Name: node.Name,
UID: node.UID,
@@ -285,5 +286,5 @@ func recordNodeStatusChange(recorder record.EventRecorder, node *api.Node, new_s
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
}

View File

@@ -26,9 +26,10 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/informers"
@@ -83,7 +84,7 @@ const (
type nodeStatusData struct {
probeTimestamp unversioned.Time
readyTransitionTimestamp unversioned.Time
status api.NodeStatus
status v1.NodeStatus
}
type NodeController struct {
@@ -91,7 +92,7 @@ type NodeController struct {
cloud cloudprovider.Interface
clusterCIDR *net.IPNet
serviceCIDR *net.IPNet
knownNodeSet map[string]*api.Node
knownNodeSet map[string]*v1.Node
kubeClient clientset.Interface
// Method for easy mocking in unittest.
lookupIP func(host string) ([]net.IP, error)
@@ -140,9 +141,9 @@ type NodeController struct {
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
cidrAllocator CIDRAllocator
forcefullyDeletePod func(*api.Pod) error
forcefullyDeletePod func(*v1.Pod) error
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
computeZoneStateFunc func(nodeConditions []*api.NodeCondition) (int, zoneState)
computeZoneStateFunc func(nodeConditions []*v1.NodeCondition) (int, zoneState)
enterPartialDisruptionFunc func(nodeNum int) float32
enterFullDisruptionFunc func(nodeNum int) float32
@@ -183,11 +184,11 @@ func NewNodeController(
nodeCIDRMaskSize int,
allocateNodeCIDRs bool) (*NodeController, error) {
eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"})
eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
@@ -208,7 +209,7 @@ func NewNodeController(
nc := &NodeController{
cloud: cloud,
knownNodeSet: make(map[string]*api.Node),
knownNodeSet: make(map[string]*v1.Node),
kubeClient: kubeClient,
recorder: recorder,
podEvictionTimeout: podEvictionTimeout,
@@ -223,7 +224,7 @@ func NewNodeController(
clusterCIDR: clusterCIDR,
serviceCIDR: serviceCIDR,
allocateNodeCIDRs: allocateNodeCIDRs,
forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
forcefullyDeletePod: func(p *v1.Pod) error { return forcefullyDeletePod(kubeClient, p) },
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
evictionLimiterQPS: evictionLimiterQPS,
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
@@ -246,14 +247,14 @@ func NewNodeController(
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
if nc.allocateNodeCIDRs {
var nodeList *api.NodeList
var nodeList *v1.NodeList
var err error
// We must poll because apiserver might not be up. This error causes
// controller manager to restart.
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
nodeList, err = kubeClient.Core().Nodes().List(api.ListOptions{
FieldSelector: fields.Everything(),
LabelSelector: labels.Everything(),
nodeList, err = kubeClient.Core().Nodes().List(v1.ListOptions{
FieldSelector: fields.Everything().String(),
LabelSelector: labels.Everything().String(),
})
if err != nil {
glog.Errorf("Failed to list all nodes: %v", err)
@@ -275,14 +276,14 @@ func NewNodeController(
utilruntime.HandleError(err)
return
}
node := obj.(*api.Node)
node := obj.(*v1.Node)
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
}
},
UpdateFunc: func(_, obj interface{}) {
node := obj.(*api.Node)
node := obj.(*v1.Node)
// If the PodCIDR is not empty we either:
// - already processed a Node that already had a CIDR after NC restarted
// (cidr is marked as used),
@@ -309,7 +310,7 @@ func NewNodeController(
return
}
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*api.Node)); err != nil {
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*v1.Node)); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
}
}
@@ -321,15 +322,15 @@ func NewNodeController(
return
}
node, isNode := obj.(*api.Node)
// We can get DeletedFinalStateUnknown instead of *api.Node here and we need to handle that correctly. #34692
node, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *v1.Node here and we need to handle that correctly. #34692
if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Errorf("Received unexpected object: %v", obj)
return
}
node, ok = deletedState.Obj.(*api.Node)
node, ok = deletedState.Obj.(*v1.Node)
if !ok {
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return
@@ -381,7 +382,7 @@ func (nc *NodeController) Run() {
} else if !exists {
glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
} else {
node, _ := obj.(*api.Node)
node, _ := obj.(*v1.Node)
zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc()
}
@@ -410,14 +411,14 @@ func (nc *NodeController) monitorNodeStatus() error {
// It is enough to list Nodes from apiserver, since we can tolerate some small
// delays comparing to state from etcd and there is eventual consistency anyway.
// TODO: We should list them from local cache: nodeStore.
nodes, err := nc.kubeClient.Core().Nodes().List(api.ListOptions{ResourceVersion: "0"})
nodes, err := nc.kubeClient.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
if err != nil {
return err
}
added, deleted := nc.checkForNodeAddedDeleted(nodes)
for i := range added {
glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name)
recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), api.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name))
recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name))
nc.knownNodeSet[added[i].Name] = added[i]
// When adding new Nodes we need to check if new zone appeared, and if so add new evictor.
zone := utilnode.GetZoneKey(added[i])
@@ -434,15 +435,15 @@ func (nc *NodeController) monitorNodeStatus() error {
for i := range deleted {
glog.V(1).Infof("NodeController observed a Node deletion: %v", deleted[i].Name)
recordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), api.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", deleted[i].Name))
recordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", deleted[i].Name))
delete(nc.knownNodeSet, deleted[i].Name)
}
zoneToNodeConditions := map[string][]*api.NodeCondition{}
zoneToNodeConditions := map[string][]*v1.NodeCondition{}
for i := range nodes.Items {
var gracePeriod time.Duration
var observedReadyCondition api.NodeCondition
var currentReadyCondition *api.NodeCondition
var observedReadyCondition v1.NodeCondition
var currentReadyCondition *v1.NodeCondition
node := &nodes.Items[i]
for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeStatus(node)
@@ -463,33 +464,33 @@ func (nc *NodeController) monitorNodeStatus() error {
continue
}
// We do not treat a master node as a part of the cluster for network disruption checking.
if !system.IsMasterNode(node) {
if !system.IsMasterNode(node.Name) {
zoneToNodeConditions[utilnode.GetZoneKey(node)] = append(zoneToNodeConditions[utilnode.GetZoneKey(node)], currentReadyCondition)
}
decisionTimestamp := nc.now()
if currentReadyCondition != nil {
// Check eviction timeout against decisionTimestamp
if observedReadyCondition.Status == api.ConditionFalse &&
if observedReadyCondition.Status == v1.ConditionFalse &&
decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
if nc.evictPods(node) {
glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout)
}
}
if observedReadyCondition.Status == api.ConditionUnknown &&
if observedReadyCondition.Status == v1.ConditionUnknown &&
decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) {
if nc.evictPods(node) {
glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod)
}
}
if observedReadyCondition.Status == api.ConditionTrue {
if observedReadyCondition.Status == v1.ConditionTrue {
if nc.cancelPodEviction(node) {
glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
}
}
// Report node event.
if currentReadyCondition.Status != api.ConditionTrue && observedReadyCondition.Status == api.ConditionTrue {
if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue {
recordNodeStatusChange(nc.recorder, node, "NodeNotReady")
if err = markAllPodsNotReady(nc.kubeClient, node); err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err))
@@ -498,7 +499,7 @@ func (nc *NodeController) monitorNodeStatus() error {
// Check with the cloud provider to see if the node still exists. If it
// doesn't, delete the node immediately.
if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil {
if currentReadyCondition.Status != v1.ConditionTrue && nc.cloud != nil {
exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
if err != nil {
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
@@ -506,7 +507,7 @@ func (nc *NodeController) monitorNodeStatus() error {
}
if !exists {
glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
recordNodeEvent(nc.recorder, node.Name, string(node.UID), api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
recordNodeEvent(nc.recorder, node.Name, string(node.UID), v1.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
go func(nodeName string) {
defer utilruntime.HandleCrash()
// Kubelet is not reporting and Cloud Provider says node
@@ -526,7 +527,7 @@ func (nc *NodeController) monitorNodeStatus() error {
return nil
}
func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*api.NodeCondition, nodes *api.NodeList) {
func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes *v1.NodeList) {
newZoneStates := map[string]zoneState{}
allAreFullyDisrupted := true
for k, v := range zoneToNodeConditions {
@@ -627,18 +628,18 @@ func (nc *NodeController) setLimiterInZone(zone string, zoneSize int, state zone
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
// is entitled, state of current and last observed Ready Condition, and an error if it occurred.
func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, api.NodeCondition, *api.NodeCondition, error) {
func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) {
var err error
var gracePeriod time.Duration
var observedReadyCondition api.NodeCondition
_, currentReadyCondition := api.GetNodeCondition(&node.Status, api.NodeReady)
var observedReadyCondition v1.NodeCondition
_, currentReadyCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if currentReadyCondition == nil {
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
// A fake ready condition is created, where LastProbeTime and LastTransitionTime is set
// to node.CreationTimestamp to avoid handle the corner case.
observedReadyCondition = api.NodeCondition{
Type: api.NodeReady,
Status: api.ConditionUnknown,
observedReadyCondition = v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: node.CreationTimestamp,
LastTransitionTime: node.CreationTimestamp,
}
@@ -669,11 +670,11 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
// - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it,
// - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check
// if that's the case, but it does not seem necessary.
var savedCondition *api.NodeCondition
var savedCondition *v1.NodeCondition
if found {
_, savedCondition = api.GetNodeCondition(&savedNodeStatus.status, api.NodeReady)
_, savedCondition = v1.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady)
}
_, observedCondition := api.GetNodeCondition(&node.Status, api.NodeReady)
_, observedCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if !found {
glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
savedNodeStatus = nodeStatusData{
@@ -725,9 +726,9 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
// (regardless of its current value) in the master.
if currentReadyCondition == nil {
glog.V(2).Infof("node %v is never updated by kubelet", node.Name)
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
Type: api.NodeReady,
Status: api.ConditionUnknown,
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: fmt.Sprintf("Kubelet never posted node status."),
LastHeartbeatTime: node.CreationTimestamp,
@@ -736,8 +737,8 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
} else {
glog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v",
node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), observedReadyCondition)
if observedReadyCondition.Status != api.ConditionUnknown {
currentReadyCondition.Status = api.ConditionUnknown
if observedReadyCondition.Status != v1.ConditionUnknown {
currentReadyCondition.Status = v1.ConditionUnknown
currentReadyCondition.Reason = "NodeStatusUnknown"
currentReadyCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.")
// LastProbeTime is the last time we heard from kubelet.
@@ -749,12 +750,12 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
// Like NodeReady condition, NodeOutOfDisk was last set longer ago than gracePeriod, so update
// it to Unknown (regardless of its current value) in the master.
// TODO(madhusudancs): Refactor this with readyCondition to remove duplicated code.
_, oodCondition := api.GetNodeCondition(&node.Status, api.NodeOutOfDisk)
_, oodCondition := v1.GetNodeCondition(&node.Status, v1.NodeOutOfDisk)
if oodCondition == nil {
glog.V(2).Infof("Out of disk condition of node %v is never updated by kubelet", node.Name)
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
Type: api.NodeOutOfDisk,
Status: api.ConditionUnknown,
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: fmt.Sprintf("Kubelet never posted node status."),
LastHeartbeatTime: node.CreationTimestamp,
@@ -763,16 +764,16 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
} else {
glog.V(4).Infof("node %v hasn't been updated for %+v. Last out of disk condition is: %+v",
node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), oodCondition)
if oodCondition.Status != api.ConditionUnknown {
oodCondition.Status = api.ConditionUnknown
if oodCondition.Status != v1.ConditionUnknown {
oodCondition.Status = v1.ConditionUnknown
oodCondition.Reason = "NodeStatusUnknown"
oodCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.")
oodCondition.LastTransitionTime = nc.now()
}
}
_, currentCondition := api.GetNodeCondition(&node.Status, api.NodeReady)
if !api.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
_, currentCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if !v1.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil {
glog.Errorf("Error updating node %s: %v", node.Name, err)
return gracePeriod, observedReadyCondition, currentReadyCondition, err
@@ -790,7 +791,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
return gracePeriod, observedReadyCondition, currentReadyCondition, err
}
func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added, deleted []*api.Node) {
func (nc *NodeController) checkForNodeAddedDeleted(nodes *v1.NodeList) (added, deleted []*v1.Node) {
for i := range nodes.Items {
if _, has := nc.knownNodeSet[nodes.Items[i].Name]; !has {
added = append(added, &nodes.Items[i])
@@ -799,7 +800,7 @@ func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added,
// If there's a difference between lengths of known Nodes and observed nodes
// we must have removed some Node.
if len(nc.knownNodeSet)+len(added) != len(nodes.Items) {
knowSetCopy := map[string]*api.Node{}
knowSetCopy := map[string]*v1.Node{}
for k, v := range nc.knownNodeSet {
knowSetCopy[k] = v
}
@@ -815,7 +816,7 @@ func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added,
// cancelPodEviction removes any queued evictions, typically because the node is available again. It
// returns true if an eviction was queued.
func (nc *NodeController) cancelPodEviction(node *api.Node) bool {
func (nc *NodeController) cancelPodEviction(node *v1.Node) bool {
zone := utilnode.GetZoneKey(node)
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
@@ -829,7 +830,7 @@ func (nc *NodeController) cancelPodEviction(node *api.Node) bool {
// evictPods queues an eviction for the provided node name, and returns false if the node is already
// queued for eviction.
func (nc *NodeController) evictPods(node *api.Node) bool {
func (nc *NodeController) evictPods(node *v1.Node) bool {
nc.evictorLock.Lock()
defer nc.evictorLock.Unlock()
return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID))
@@ -853,11 +854,11 @@ func (nc *NodeController) ReducedQPSFunc(nodeNum int) float32 {
// - fullyDisrupted if there're no Ready Nodes,
// - partiallyDisrupted if at least than nc.unhealthyZoneThreshold percent of Nodes are not Ready,
// - normal otherwise
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*api.NodeCondition) (int, zoneState) {
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition) (int, zoneState) {
readyNodes := 0
notReadyNodes := 0
for i := range nodeReadyConditions {
if nodeReadyConditions[i] != nil && nodeReadyConditions[i].Status == api.ConditionTrue {
if nodeReadyConditions[i] != nil && nodeReadyConditions[i].Status == v1.ConditionTrue {
readyNodes++
} else {
notReadyNodes++

File diff suppressed because it is too large Load Diff

View File

@@ -26,8 +26,9 @@ import (
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/clock"
utilnode "k8s.io/kubernetes/pkg/util/node"
@@ -43,14 +44,14 @@ type FakeNodeHandler struct {
*fake.Clientset
// Input: Hooks determine if request is valid or not
CreateHook func(*FakeNodeHandler, *api.Node) bool
Existing []*api.Node
CreateHook func(*FakeNodeHandler, *v1.Node) bool
Existing []*v1.Node
// Output
CreatedNodes []*api.Node
DeletedNodes []*api.Node
UpdatedNodes []*api.Node
UpdatedNodeStatuses []*api.Node
CreatedNodes []*v1.Node
DeletedNodes []*v1.Node
UpdatedNodes []*v1.Node
UpdatedNodeStatuses []*v1.Node
RequestCount int
// Synchronization
@@ -59,29 +60,29 @@ type FakeNodeHandler struct {
}
type FakeLegacyHandler struct {
unversionedcore.CoreInterface
v1core.CoreV1Interface
n *FakeNodeHandler
}
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*api.Node {
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*v1.Node {
c.lock.Lock()
defer c.lock.Unlock()
updatedNodesCopy := make([]*api.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
updatedNodesCopy := make([]*v1.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
for i, ptr := range c.UpdatedNodes {
updatedNodesCopy[i] = ptr
}
return updatedNodesCopy
}
func (c *FakeNodeHandler) Core() unversionedcore.CoreInterface {
func (c *FakeNodeHandler) Core() v1core.CoreV1Interface {
return &FakeLegacyHandler{c.Clientset.Core(), c}
}
func (m *FakeLegacyHandler) Nodes() unversionedcore.NodeInterface {
func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface {
return m.n
}
func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
@@ -101,7 +102,7 @@ func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
}
}
func (m *FakeNodeHandler) Get(name string) (*api.Node, error) {
func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
@@ -122,13 +123,13 @@ func (m *FakeNodeHandler) Get(name string) (*api.Node, error) {
return nil, nil
}
func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) {
func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
m.lock.Unlock()
}()
var nodes []*api.Node
var nodes []*v1.Node
for i := 0; i < len(m.UpdatedNodes); i++ {
if !contains(m.UpdatedNodes[i], m.DeletedNodes) {
nodes = append(nodes, m.UpdatedNodes[i])
@@ -144,14 +145,14 @@ func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) {
nodes = append(nodes, m.CreatedNodes[i])
}
}
nodeList := &api.NodeList{}
nodeList := &v1.NodeList{}
for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node)
}
return nodeList, nil
}
func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error {
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
m.lock.Lock()
defer func() {
m.RequestCount++
@@ -164,11 +165,11 @@ func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error {
return nil
}
func (m *FakeNodeHandler) DeleteCollection(opt *api.DeleteOptions, listOpts api.ListOptions) error {
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
return nil
}
func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
@@ -185,7 +186,7 @@ func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
return node, nil
}
func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) {
func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
m.lock.Lock()
defer func() {
m.RequestCount++
@@ -196,23 +197,23 @@ func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) {
return node, nil
}
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*api.Node, error) {
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
m.RequestCount++
return &api.Node{}, nil
return &v1.Node{}, nil
}
func (m *FakeNodeHandler) Watch(opts api.ListOptions) (watch.Interface, error) {
func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
return watch.NewFake(), nil
}
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*api.Node, error) {
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
return nil, nil
}
// FakeRecorder is used as a fake during testing.
type FakeRecorder struct {
source api.EventSource
events []*api.Event
source v1.EventSource
events []*v1.Event
clock clock.Clock
}
@@ -228,7 +229,7 @@ func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp unversioned.Time
}
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
ref, err := api.GetReference(obj)
ref, err := v1.GetReference(obj)
if err != nil {
return
}
@@ -240,15 +241,15 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.T
}
}
func (f *FakeRecorder) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event {
func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event {
fmt.Println("make event")
t := unversioned.Time{Time: f.clock.Now()}
namespace := ref.Namespace
if namespace == "" {
namespace = api.NamespaceDefault
namespace = v1.NamespaceDefault
}
return &api.Event{
ObjectMeta: api.ObjectMeta{
return &v1.Event{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
Namespace: namespace,
},
@@ -264,41 +265,41 @@ func (f *FakeRecorder) makeEvent(ref *api.ObjectReference, eventtype, reason, me
func NewFakeRecorder() *FakeRecorder {
return &FakeRecorder{
source: api.EventSource{Component: "nodeControllerTest"},
events: []*api.Event{},
source: v1.EventSource{Component: "nodeControllerTest"},
events: []*v1.Event{},
clock: clock.NewFakeClock(time.Now()),
}
}
func newNode(name string) *api.Node {
return &api.Node{
ObjectMeta: api.ObjectMeta{Name: name},
Spec: api.NodeSpec{
func newNode(name string) *v1.Node {
return &v1.Node{
ObjectMeta: v1.ObjectMeta{Name: name},
Spec: v1.NodeSpec{
ExternalID: name,
},
Status: api.NodeStatus{
Capacity: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
}
}
func newPod(name, host string) *api.Pod {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
func newPod(name, host string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: name,
},
Spec: api.PodSpec{
Spec: v1.PodSpec{
NodeName: host,
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
Status: v1.PodStatus{
Conditions: []v1.PodCondition{
{
Type: api.PodReady,
Status: api.ConditionTrue,
Type: v1.PodReady,
Status: v1.ConditionTrue,
},
},
},
@@ -307,7 +308,7 @@ func newPod(name, host string) *api.Pod {
return pod
}
func contains(node *api.Node, nodes []*api.Node) bool {
func contains(node *v1.Node, nodes []*v1.Node) bool {
for i := 0; i < len(nodes); i++ {
if node.Name == nodes[i].Name {
return true
@@ -318,7 +319,7 @@ func contains(node *api.Node, nodes []*api.Node) bool {
// Returns list of zones for all Nodes stored in FakeNodeHandler
func getZones(nodeHandler *FakeNodeHandler) []string {
nodes, _ := nodeHandler.List(api.ListOptions{})
nodes, _ := nodeHandler.List(v1.ListOptions{})
zones := sets.NewString()
for _, node := range nodes.Items {
zones.Insert(utilnode.GetZoneKey(&node))