cmd/kube-controller-manager
This commit is contained in:
@@ -26,14 +26,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -107,13 +107,13 @@ func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInforme
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
rsc := &ReplicaSetController{
|
||||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replicaset-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "replicaset-controller"}),
|
||||
},
|
||||
burstReplicas: burstReplicas,
|
||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||
@@ -176,7 +176,7 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||
// getPodReplicaSet returns the replica set managing the given pod.
|
||||
// TODO: Surface that we are ignoring multiple replica sets for a single pod.
|
||||
// TODO: use ownerReference.Controller to determine if the rs controls the pod.
|
||||
func (rsc *ReplicaSetController) getPodReplicaSet(pod *api.Pod) *extensions.ReplicaSet {
|
||||
func (rsc *ReplicaSetController) getPodReplicaSet(pod *v1.Pod) *extensions.ReplicaSet {
|
||||
// look up in the cache, if cached and the cache is valid, just return cached value
|
||||
if obj, cached := rsc.lookupCache.GetMatchingObject(pod); cached {
|
||||
rs, ok := obj.(*extensions.ReplicaSet)
|
||||
@@ -254,7 +254,7 @@ func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// isCacheValid check if the cache is valid
|
||||
func (rsc *ReplicaSetController) isCacheValid(pod *api.Pod, cachedRS *extensions.ReplicaSet) bool {
|
||||
func (rsc *ReplicaSetController) isCacheValid(pod *v1.Pod, cachedRS *extensions.ReplicaSet) bool {
|
||||
_, err := rsc.rsLister.ReplicaSets(cachedRS.Namespace).Get(cachedRS.Name)
|
||||
// rs has been deleted or updated, cache is invalid
|
||||
if err != nil || !isReplicaSetMatch(pod, cachedRS) {
|
||||
@@ -265,7 +265,7 @@ func (rsc *ReplicaSetController) isCacheValid(pod *api.Pod, cachedRS *extensions
|
||||
|
||||
// isReplicaSetMatch take a Pod and ReplicaSet, return whether the Pod and ReplicaSet are matching
|
||||
// TODO(mqliang): This logic is a copy from GetPodReplicaSets(), remove the duplication
|
||||
func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool {
|
||||
func isReplicaSetMatch(pod *v1.Pod, rs *extensions.ReplicaSet) bool {
|
||||
if rs.Namespace != pod.Namespace {
|
||||
return false
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool {
|
||||
|
||||
// When a pod is created, enqueue the replica set that manages it and update it's expectations.
|
||||
func (rsc *ReplicaSetController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
|
||||
|
||||
rs := rsc.getPodReplicaSet(pod)
|
||||
@@ -308,10 +308,10 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) {
|
||||
|
||||
// When a pod is updated, figure out what replica set/s manage it and wake them
|
||||
// up. If the labels of the pod have changed we need to awaken both the old
|
||||
// and new replica set. old and cur must be *api.Pod types.
|
||||
// and new replica set. old and cur must be *v1.Pod types.
|
||||
func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -348,9 +348,9 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
|
||||
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
func (rsc *ReplicaSetController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@@ -362,7 +362,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj))
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %#v", obj))
|
||||
return
|
||||
@@ -426,8 +426,8 @@ func (rsc *ReplicaSetController) processNextWorkItem() bool {
|
||||
// manageReplicas checks and updates replicas for the given ReplicaSet.
|
||||
// Does NOT modify <filteredPods>.
|
||||
// It will requeue the replica set in case of an error while creating/deleting pods.
|
||||
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) error {
|
||||
diff := len(filteredPods) - int(rs.Spec.Replicas)
|
||||
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *extensions.ReplicaSet) error {
|
||||
diff := len(filteredPods) - int(*(rs.Spec.Replicas))
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err))
|
||||
@@ -448,7 +448,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
|
||||
rsc.expectations.ExpectCreations(rsKey, diff)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(diff)
|
||||
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
|
||||
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
for i := 0; i < diff; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -456,7 +456,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
|
||||
|
||||
if rsc.garbageCollectorEnabled {
|
||||
var trueVar = true
|
||||
controllerRef := &api.OwnerReference{
|
||||
controllerRef := &v1.OwnerReference{
|
||||
APIVersion: getRSKind().GroupVersion().String(),
|
||||
Kind: getRSKind().Kind,
|
||||
Name: rs.Name,
|
||||
@@ -481,9 +481,9 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
|
||||
diff = rsc.burstReplicas
|
||||
}
|
||||
errCh = make(chan error, diff)
|
||||
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
|
||||
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
// No need to sort pods if we are about to delete all of them
|
||||
if rs.Spec.Replicas != 0 {
|
||||
if *(rs.Spec.Replicas) != 0 {
|
||||
// Sort the pods in the order such that not-ready < ready, unscheduled
|
||||
// < scheduled, and pending < running. This ensures that we delete pods
|
||||
// in the earlier stages whenever possible.
|
||||
@@ -567,7 +567,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
// NOTE: filteredPods are pointing to objects from cache - if you need to
|
||||
// modify them, you need to copy it first.
|
||||
// TODO: Do the List and Filter in a single pass, or use an index.
|
||||
var filteredPods []*api.Pod
|
||||
var filteredPods []*v1.Pod
|
||||
if rsc.garbageCollectorEnabled {
|
||||
// list all pods to include the pods that don't match the rs`s selector
|
||||
// anymore but has the stale controller ref.
|
||||
|
Reference in New Issue
Block a user