Update the DaemonSet controller to use the apps/v1 API
This commit is contained in:
parent
fd1527a977
commit
f52e7ef4bf
@ -21,10 +21,31 @@ limitations under the License.
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/controller/statefulset"
|
||||
)
|
||||
|
||||
func startDaemonSetController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}] {
|
||||
return false, nil
|
||||
}
|
||||
dsc, err := daemon.NewDaemonSetsController(
|
||||
ctx.InformerFactory.Apps().V1().DaemonSets(),
|
||||
ctx.InformerFactory.Apps().V1().ControllerRevisions(),
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.InformerFactory.Core().V1().Nodes(),
|
||||
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
|
||||
)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startStatefulSetController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1beta1", Resource: "statefulsets"}] {
|
||||
return false, nil
|
||||
|
@ -24,29 +24,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
)
|
||||
|
||||
func startDaemonSetController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "daemonsets"}] {
|
||||
return false, nil
|
||||
}
|
||||
dsc, err := daemon.NewDaemonSetsController(
|
||||
ctx.InformerFactory.Extensions().V1beta1().DaemonSets(),
|
||||
ctx.InformerFactory.Apps().V1beta1().ControllerRevisions(),
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.InformerFactory.Core().V1().Nodes(),
|
||||
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
|
||||
)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startDeploymentController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "extensions", Version: "v1beta1", Resource: "deployments"}] {
|
||||
return false, nil
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -436,18 +436,18 @@ func NewControllerRevisionControllerRefManager(
|
||||
// If the error is nil, either the reconciliation succeeded, or no
|
||||
// reconciliation was necessary. The list of ControllerRevisions that you now own is
|
||||
// returned.
|
||||
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*appsv1beta1.ControllerRevision) ([]*appsv1beta1.ControllerRevision, error) {
|
||||
var claimed []*appsv1beta1.ControllerRevision
|
||||
func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histories []*apps.ControllerRevision) ([]*apps.ControllerRevision, error) {
|
||||
var claimed []*apps.ControllerRevision
|
||||
var errlist []error
|
||||
|
||||
match := func(obj metav1.Object) bool {
|
||||
return m.Selector.Matches(labels.Set(obj.GetLabels()))
|
||||
}
|
||||
adopt := func(obj metav1.Object) error {
|
||||
return m.AdoptControllerRevision(obj.(*appsv1beta1.ControllerRevision))
|
||||
return m.AdoptControllerRevision(obj.(*apps.ControllerRevision))
|
||||
}
|
||||
release := func(obj metav1.Object) error {
|
||||
return m.ReleaseControllerRevision(obj.(*appsv1beta1.ControllerRevision))
|
||||
return m.ReleaseControllerRevision(obj.(*apps.ControllerRevision))
|
||||
}
|
||||
|
||||
for _, h := range histories {
|
||||
@ -465,7 +465,7 @@ func (m *ControllerRevisionControllerRefManager) ClaimControllerRevisions(histor
|
||||
|
||||
// AdoptControllerRevision sends a patch to take control of the ControllerRevision. It returns the error if
|
||||
// the patching fails.
|
||||
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *appsv1beta1.ControllerRevision) error {
|
||||
func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history *apps.ControllerRevision) error {
|
||||
if err := m.CanAdopt(); err != nil {
|
||||
return fmt.Errorf("can't adopt ControllerRevision %v/%v (%v): %v", history.Namespace, history.Name, history.UID, err)
|
||||
}
|
||||
@ -480,7 +480,7 @@ func (m *ControllerRevisionControllerRefManager) AdoptControllerRevision(history
|
||||
|
||||
// ReleaseControllerRevision sends a patch to free the ControllerRevision from the control of its controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *appsv1beta1.ControllerRevision) error {
|
||||
func (m *ControllerRevisionControllerRefManager) ReleaseControllerRevision(history *apps.ControllerRevision) error {
|
||||
glog.V(2).Infof("patching ControllerRevision %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
history.Namespace, history.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), history.UID)
|
||||
|
@ -23,9 +23,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -34,16 +33,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1beta1"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
unversionedapps "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
unversionedextensions "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1beta1"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extensionslisters "k8s.io/client-go/listers/extensions/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/integer"
|
||||
@ -82,7 +79,7 @@ const (
|
||||
)
|
||||
|
||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
|
||||
var controllerKind = extensions.SchemeGroupVersion.WithKind("DaemonSet")
|
||||
var controllerKind = apps.SchemeGroupVersion.WithKind("DaemonSet")
|
||||
|
||||
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
|
||||
// in the system with actual running pods.
|
||||
@ -99,12 +96,12 @@ type DaemonSetsController struct {
|
||||
// To allow injection of syncDaemonSet for testing.
|
||||
syncHandler func(dsKey string) error
|
||||
// used for unit testing
|
||||
enqueueDaemonSet func(ds *extensions.DaemonSet)
|
||||
enqueueDaemonSetRateLimited func(ds *extensions.DaemonSet)
|
||||
enqueueDaemonSet func(ds *apps.DaemonSet)
|
||||
enqueueDaemonSetRateLimited func(ds *apps.DaemonSet)
|
||||
// A TTLCache of pod creates/deletes each ds expects to see
|
||||
expectations controller.ControllerExpectationsInterface
|
||||
// dsLister can list/get daemonsets from the shared informer's store
|
||||
dsLister extensionslisters.DaemonSetLister
|
||||
dsLister appslisters.DaemonSetLister
|
||||
// dsStoreSynced returns true if the daemonset store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
dsStoreSynced cache.InformerSynced
|
||||
@ -134,7 +131,7 @@ type DaemonSetsController struct {
|
||||
}
|
||||
|
||||
// NewDaemonSetsController creates a new DaemonSetsController
|
||||
func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
|
||||
func NewDaemonSetsController(daemonSetInformer appsinformers.DaemonSetInformer, historyInformer appsinformers.ControllerRevisionInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
@ -163,13 +160,13 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
|
||||
|
||||
daemonSetInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
ds := obj.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(ds)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldDS := old.(*extensions.DaemonSet)
|
||||
curDS := cur.(*extensions.DaemonSet)
|
||||
oldDS := old.(*apps.DaemonSet)
|
||||
curDS := cur.(*apps.DaemonSet)
|
||||
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(curDS)
|
||||
},
|
||||
@ -211,14 +208,14 @@ func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInfo
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
ds, ok := obj.(*extensions.DaemonSet)
|
||||
ds, ok := obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
ds, ok = tombstone.Obj.(*extensions.DaemonSet)
|
||||
ds, ok = tombstone.Obj.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a DaemonSet %#v", obj))
|
||||
return
|
||||
@ -272,7 +269,7 @@ func (dsc *DaemonSetsController) processNextWorkItem() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) {
|
||||
func (dsc *DaemonSetsController) enqueue(ds *apps.DaemonSet) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err))
|
||||
@ -283,7 +280,7 @@ func (dsc *DaemonSetsController) enqueue(ds *extensions.DaemonSet) {
|
||||
dsc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) enqueueRateLimited(ds *extensions.DaemonSet) {
|
||||
func (dsc *DaemonSetsController) enqueueRateLimited(ds *apps.DaemonSet) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", ds, err))
|
||||
@ -305,7 +302,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSetAfter(obj interface{}, after ti
|
||||
}
|
||||
|
||||
// getDaemonSetsForPod returns a list of DaemonSets that potentially match the pod.
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*apps.DaemonSet {
|
||||
sets, err := dsc.dsLister.GetPodDaemonSets(pod)
|
||||
if err != nil {
|
||||
return nil
|
||||
@ -320,7 +317,7 @@ func (dsc *DaemonSetsController) getDaemonSetsForPod(pod *v1.Pod) []*extensions.
|
||||
|
||||
// getDaemonSetsForHistory returns a list of DaemonSets that potentially
|
||||
// match a ControllerRevision.
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getDaemonSetsForHistory(history *apps.ControllerRevision) []*apps.DaemonSet {
|
||||
daemonSets, err := dsc.dsLister.GetHistoryDaemonSets(history)
|
||||
if err != nil || len(daemonSets) == 0 {
|
||||
return nil
|
||||
@ -736,7 +733,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned Pods are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.Pod, error) {
|
||||
func (dsc *DaemonSetsController) getDaemonPods(ds *apps.DaemonSet) ([]*v1.Pod, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -751,7 +748,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.
|
||||
// If any adoptions are attempted, we should first recheck for deletion with
|
||||
// an uncached quorum read sometime after listing Pods (see #42639).
|
||||
dsNotDeleted := controller.RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||
fresh, err := dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
fresh, err := dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace).Get(ds.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -770,7 +767,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ds *extensions.DaemonSet) ([]*v1.
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned Pods are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
|
||||
claimedPods, err := dsc.getDaemonPods(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -787,7 +784,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet)
|
||||
// resolveControllerRef returns the controller referenced by a ControllerRef,
|
||||
// or nil if the ControllerRef could not be resolved to a matching controller
|
||||
// of the correct Kind.
|
||||
func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.DaemonSet {
|
||||
// We can't look up by UID, so look up by Name and then verify UID.
|
||||
// Don't even try to look up by Name if it's the wrong Kind.
|
||||
if controllerRef.Kind != controllerKind.Kind {
|
||||
@ -809,7 +806,7 @@ func (dsc *DaemonSetsController) resolveControllerRef(namespace string, controll
|
||||
// After figuring out which nodes should run a Pod of ds but not yet running one and
|
||||
// which nodes should not run a Pod of ds but currently running one, it calls function
|
||||
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
|
||||
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) error {
|
||||
func (dsc *DaemonSetsController) manage(ds *apps.DaemonSet, hash string) error {
|
||||
// Find out which nodes are running the daemon pods controlled by ds.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
@ -891,7 +888,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet, hash string) e
|
||||
|
||||
// syncNodes deletes given pods and creates new daemon set pods on the given nodes
|
||||
// returns slice with erros if any
|
||||
func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||
func (dsc *DaemonSetsController) syncNodes(ds *apps.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
|
||||
// We need to set expectations before creating/deleting pods to avoid race conditions.
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
@ -915,7 +912,13 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
|
||||
|
||||
glog.V(4).Infof("Nodes needing daemon pods for daemon set %s: %+v, creating %d", ds.Name, nodesNeedingDaemonPods, createDiff)
|
||||
createWait := sync.WaitGroup{}
|
||||
template := util.CreatePodTemplate(ds.Spec.Template, ds.Spec.TemplateGeneration, hash)
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
template := util.CreatePodTemplate(ds.Spec.Template, generation, hash)
|
||||
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
|
||||
// and double with each successful iteration in a kind of "slow start".
|
||||
// This handles attempts to start large numbers of pods that would
|
||||
@ -989,7 +992,7 @@ func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelet
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
|
||||
func storeDaemonSetStatus(dsClient unversionedapps.DaemonSetInterface, ds *apps.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable int) error {
|
||||
if int(ds.Status.DesiredNumberScheduled) == desiredNumberScheduled &&
|
||||
int(ds.Status.CurrentNumberScheduled) == currentNumberScheduled &&
|
||||
int(ds.Status.NumberMisscheduled) == numberMisscheduled &&
|
||||
@ -1028,7 +1031,7 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
|
||||
return updateErr
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet, hash string) error {
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *apps.DaemonSet, hash string) error {
|
||||
glog.V(4).Infof("Updating daemon set status")
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
@ -1063,7 +1066,13 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
|
||||
numberAvailable++
|
||||
}
|
||||
}
|
||||
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) {
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
if util.IsPodUpdated(pod, hash, generation) {
|
||||
updatedNumberScheduled++
|
||||
}
|
||||
}
|
||||
@ -1075,7 +1084,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet,
|
||||
}
|
||||
numberUnavailable := desiredNumberScheduled - numberAvailable
|
||||
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.ExtensionsV1beta1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
err = storeDaemonSetStatus(dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
|
||||
}
|
||||
@ -1122,7 +1131,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to construct revisions of DaemonSet: %v", err)
|
||||
}
|
||||
hash := cur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
hash := cur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
|
||||
if ds.DeletionTimestamp != nil || !dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
// Only update status.
|
||||
@ -1137,8 +1146,8 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
// Process rolling updates if we're ready.
|
||||
if dsc.expectations.SatisfiedExpectations(dsKey) {
|
||||
switch ds.Spec.UpdateStrategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
err = dsc.rollingUpdate(ds, hash)
|
||||
}
|
||||
if err != nil {
|
||||
@ -1154,7 +1163,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return dsc.updateDaemonSetStatus(ds, hash)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *extensions.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
|
||||
func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *apps.DaemonSet) ([]algorithm.PredicateFailureReason, *schedulercache.NodeInfo, error) {
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
@ -1240,7 +1249,7 @@ func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *ext
|
||||
// * shouldContinueRunning:
|
||||
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
|
||||
// running on that node.
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *apps.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
|
||||
newPod := NewPod(ds, node.Name)
|
||||
|
||||
// Because these bools require an && of all their required conditions, we start
|
||||
@ -1325,7 +1334,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
||||
}
|
||||
|
||||
// NewPod creates a new pod
|
||||
func NewPod(ds *extensions.DaemonSet, nodeName string) *v1.Pod {
|
||||
func NewPod(ds *apps.DaemonSet, nodeName string) *v1.Pod {
|
||||
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
|
||||
newPod.Namespace = ds.Namespace
|
||||
newPod.Spec.NodeName = nodeName
|
||||
@ -1363,7 +1372,7 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []*extensions.DaemonSet
|
||||
type byCreationTimestamp []*apps.DaemonSet
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -40,7 +40,6 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@ -83,7 +82,7 @@ var (
|
||||
}}
|
||||
)
|
||||
|
||||
func getKey(ds *extensions.DaemonSet, t *testing.T) string {
|
||||
func getKey(ds *apps.DaemonSet, t *testing.T) string {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
|
||||
if err != nil {
|
||||
@ -92,19 +91,18 @@ func getKey(ds *extensions.DaemonSet, t *testing.T) string {
|
||||
return key
|
||||
}
|
||||
|
||||
func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
func newDaemonSet(name string) *apps.DaemonSet {
|
||||
two := int32(2)
|
||||
return &extensions.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||
return &apps.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
RevisionHistoryLimit: &two,
|
||||
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
|
||||
Type: extensions.OnDeleteDaemonSetStrategyType,
|
||||
UpdateStrategy: apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.OnDeleteDaemonSetStrategyType,
|
||||
},
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
@ -127,22 +125,22 @@ func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
}
|
||||
}
|
||||
|
||||
func newRollbackStrategy() *extensions.DaemonSetUpdateStrategy {
|
||||
func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
|
||||
one := intstr.FromInt(1)
|
||||
return &extensions.DaemonSetUpdateStrategy{
|
||||
Type: extensions.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &extensions.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||
return &apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||
}
|
||||
}
|
||||
|
||||
func newOnDeleteStrategy() *extensions.DaemonSetUpdateStrategy {
|
||||
return &extensions.DaemonSetUpdateStrategy{
|
||||
Type: extensions.OnDeleteDaemonSetStrategyType,
|
||||
func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
|
||||
return &apps.DaemonSetUpdateStrategy{
|
||||
Type: apps.OnDeleteDaemonSetStrategyType,
|
||||
}
|
||||
}
|
||||
|
||||
func updateStrategies() []*extensions.DaemonSetUpdateStrategy {
|
||||
return []*extensions.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
func updateStrategies() []*apps.DaemonSetUpdateStrategy {
|
||||
return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
|
||||
}
|
||||
|
||||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
@ -170,14 +168,14 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string, ds *extensions.DaemonSet) *v1.Pod {
|
||||
func newPod(podName string, nodeName string, label map[string]string, ds *apps.DaemonSet) *v1.Pod {
|
||||
// Add hash unique label to the pod
|
||||
newLabels := label
|
||||
var podSpec v1.PodSpec
|
||||
// Copy pod spec from DaemonSet template, or use a default one if DaemonSet is nil
|
||||
if ds != nil {
|
||||
hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
|
||||
newLabels = labelsutil.CloneAndAddLabel(label, extensions.DefaultDaemonSetUniqueLabelKey, hash)
|
||||
newLabels = labelsutil.CloneAndAddLabel(label, apps.DefaultDaemonSetUniqueLabelKey, hash)
|
||||
podSpec = ds.Spec.Template.Spec
|
||||
} else {
|
||||
podSpec = v1.PodSpec{
|
||||
@ -212,14 +210,14 @@ func newPod(podName string, nodeName string, label map[string]string, ds *extens
|
||||
return pod
|
||||
}
|
||||
|
||||
func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
|
||||
func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
|
||||
for i := 0; i < number; i++ {
|
||||
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
|
||||
podStore.Add(pod)
|
||||
}
|
||||
}
|
||||
|
||||
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *extensions.DaemonSet, number int) {
|
||||
func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
|
||||
for i := 0; i < number; i++ {
|
||||
pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
|
||||
pod.Status = v1.PodStatus{Phase: v1.PodFailed}
|
||||
@ -299,8 +297,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
|
||||
dsc, err := NewDaemonSetsController(
|
||||
informerFactory.Extensions().V1beta1().DaemonSets(),
|
||||
informerFactory.Apps().V1beta1().ControllerRevisions(),
|
||||
informerFactory.Apps().V1().DaemonSets(),
|
||||
informerFactory.Apps().V1().ControllerRevisions(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
clientset,
|
||||
@ -322,8 +320,8 @@ func newTestController(initialObjects ...runtime.Object) (*daemonSetsController,
|
||||
|
||||
return &daemonSetsController{
|
||||
dsc,
|
||||
informerFactory.Extensions().V1beta1().DaemonSets().Informer().GetStore(),
|
||||
informerFactory.Apps().V1beta1().ControllerRevisions().Informer().GetStore(),
|
||||
informerFactory.Apps().V1().DaemonSets().Informer().GetStore(),
|
||||
informerFactory.Apps().V1().ControllerRevisions().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Pods().Informer().GetStore(),
|
||||
informerFactory.Core().V1().Nodes().Informer().GetStore(),
|
||||
fakeRecorder,
|
||||
@ -346,7 +344,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod
|
||||
}
|
||||
// Make sure the ControllerRefs are correct.
|
||||
for _, controllerRef := range fakePodControl.ControllerRefs {
|
||||
if got, want := controllerRef.APIVersion, "extensions/v1beta1"; got != want {
|
||||
if got, want := controllerRef.APIVersion, "apps/v1"; got != want {
|
||||
t.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
|
||||
}
|
||||
if got, want := controllerRef.Kind, "DaemonSet"; got != want {
|
||||
@ -358,7 +356,7 @@ func validateSyncDaemonSets(t *testing.T, manager *daemonSetsController, fakePod
|
||||
}
|
||||
}
|
||||
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) {
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get key for daemon.")
|
||||
@ -368,7 +366,7 @@ func syncAndValidateDaemonSets(t *testing.T, manager *daemonSetsController, ds *
|
||||
}
|
||||
|
||||
// clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations.
|
||||
func clearExpectations(t *testing.T, manager *daemonSetsController, ds *extensions.DaemonSet, fakePodControl *fakePodControl) {
|
||||
func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, fakePodControl *fakePodControl) {
|
||||
fakePodControl.Clear()
|
||||
|
||||
key, err := controller.KeyFunc(ds)
|
||||
@ -459,13 +457,13 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
|
||||
var updated *extensions.DaemonSet
|
||||
var updated *apps.DaemonSet
|
||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetSubresource() != "status" {
|
||||
return false, nil, nil
|
||||
}
|
||||
if u, ok := action.(core.UpdateAction); ok {
|
||||
updated = u.GetObject().(*extensions.DaemonSet)
|
||||
updated = u.GetObject().(*apps.DaemonSet)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
@ -585,9 +583,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
})
|
||||
manager.dsStore.Add(ds)
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -615,9 +613,9 @@ func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T)
|
||||
})
|
||||
manager.dsStore.Add(ds)
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -1123,13 +1121,13 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
var updated *extensions.DaemonSet
|
||||
var updated *apps.DaemonSet
|
||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetSubresource() != "status" {
|
||||
return false, nil, nil
|
||||
}
|
||||
if u, ok := action.(core.UpdateAction); ok {
|
||||
updated = u.GetObject().(*extensions.DaemonSet)
|
||||
updated = u.GetObject().(*apps.DaemonSet)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
@ -1166,13 +1164,13 @@ func TestObservedGeneration(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
var updated *extensions.DaemonSet
|
||||
var updated *apps.DaemonSet
|
||||
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
if action.GetSubresource() != "status" {
|
||||
return false, nil, nil
|
||||
}
|
||||
if u, ok := action.(core.UpdateAction); ok {
|
||||
updated = u.GetObject().(*extensions.DaemonSet)
|
||||
updated = u.GetObject().(*apps.DaemonSet)
|
||||
}
|
||||
return false, nil, nil
|
||||
})
|
||||
@ -1385,7 +1383,7 @@ func setNodeTaint(node *v1.Node, taints []v1.Taint) {
|
||||
node.Spec.Taints = taints
|
||||
}
|
||||
|
||||
func setDaemonSetToleration(ds *extensions.DaemonSet, tolerations []v1.Toleration) {
|
||||
func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
|
||||
ds.Spec.Template.Spec.Tolerations = tolerations
|
||||
}
|
||||
|
||||
@ -1482,9 +1480,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
|
||||
manager.dsStore.Add(ds)
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -1493,9 +1491,9 @@ func TestInsufficientCapacityNodeDaemonLaunchesCriticalPod(t *testing.T) {
|
||||
// Enabling critical pod annotation feature gate should create critical pod
|
||||
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
@ -1534,7 +1532,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchCriticalPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setDaemonSetCritical(ds *extensions.DaemonSet) {
|
||||
func setDaemonSetCritical(ds *apps.DaemonSet) {
|
||||
ds.Namespace = api.NamespaceSystem
|
||||
if ds.Spec.Template.ObjectMeta.Annotations == nil {
|
||||
ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
|
||||
@ -1547,14 +1545,14 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
predicateName string
|
||||
podsOnNode []*v1.Pod
|
||||
nodeCondition []v1.NodeCondition
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
wantToRun, shouldSchedule, shouldContinueRunning bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1570,8 +1568,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
predicateName: "InsufficientResourceError",
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1587,8 +1585,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
predicateName: "ErrPodNotMatchHostName",
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1615,8 +1613,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1650,8 +1648,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1679,8 +1677,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1696,8 +1694,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
predicateName: "ErrNodeSelectorNotMatch",
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1715,8 +1713,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1734,8 +1732,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
predicateName: "ErrPodAffinityNotMatch",
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1769,8 +1767,8 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
||||
},
|
||||
{
|
||||
predicateName: "ShouldRunDaemonPod",
|
||||
ds: &extensions.DaemonSet{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
ds: &apps.DaemonSet{
|
||||
Spec: apps.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -1845,14 +1843,14 @@ func TestUpdateNode(t *testing.T) {
|
||||
test string
|
||||
newNode *v1.Node
|
||||
oldNode *v1.Node
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
shouldEnqueue bool
|
||||
}{
|
||||
{
|
||||
test: "Nothing changed, should not enqueue",
|
||||
oldNode: newNode("node1", nil),
|
||||
newNode: newNode("node1", nil),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
return ds
|
||||
@ -1863,7 +1861,7 @@ func TestUpdateNode(t *testing.T) {
|
||||
test: "Node labels changed",
|
||||
oldNode: newNode("node1", nil),
|
||||
newNode: newNode("node1", simpleNodeLabel),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
|
||||
return ds
|
||||
@ -1893,7 +1891,7 @@ func TestUpdateNode(t *testing.T) {
|
||||
manager.dsStore.Add(c.ds)
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 0)
|
||||
|
||||
manager.enqueueDaemonSet = func(ds *extensions.DaemonSet) {
|
||||
manager.enqueueDaemonSet = func(ds *apps.DaemonSet) {
|
||||
if ds.Name == "ds" {
|
||||
enqueued = true
|
||||
}
|
||||
@ -1917,7 +1915,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
node *v1.Node
|
||||
existPods []*v1.Pod
|
||||
deletedPod *v1.Pod
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
shouldEnqueue bool
|
||||
}{
|
||||
{
|
||||
@ -1952,7 +1950,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
Spec: podSpec,
|
||||
}
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
|
||||
return ds
|
||||
@ -1997,7 +1995,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
Spec: podSpec,
|
||||
}
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
|
||||
return ds
|
||||
@ -2039,7 +2037,7 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
Spec: podSpec,
|
||||
}
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("ds")
|
||||
ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
|
||||
return ds
|
||||
@ -2061,15 +2059,15 @@ func TestDeleteNoDaemonPod(t *testing.T) {
|
||||
manager.podStore.Add(pod)
|
||||
}
|
||||
switch strategy.Type {
|
||||
case extensions.OnDeleteDaemonSetStrategyType:
|
||||
case apps.OnDeleteDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 2)
|
||||
case extensions.RollingUpdateDaemonSetStrategyType:
|
||||
case apps.RollingUpdateDaemonSetStrategyType:
|
||||
syncAndValidateDaemonSets(t, manager, c.ds, podControl, 0, 0, 3)
|
||||
default:
|
||||
t.Fatalf("unexpected UpdateStrategy %+v", strategy)
|
||||
}
|
||||
|
||||
manager.enqueueDaemonSetRateLimited = func(ds *extensions.DaemonSet) {
|
||||
manager.enqueueDaemonSetRateLimited = func(ds *apps.DaemonSet) {
|
||||
if ds.Name == "ds" {
|
||||
enqueued = true
|
||||
}
|
||||
|
@ -23,9 +23,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apps "k8s.io/api/apps/v1beta1"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -41,7 +40,7 @@ import (
|
||||
|
||||
// rollingUpdate deletes old daemon set pods making sure that no more than
|
||||
// ds.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable pods are unavailable
|
||||
func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash string) error {
|
||||
func (dsc *DaemonSetsController) rollingUpdate(ds *apps.DaemonSet, hash string) error {
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@ -82,7 +81,7 @@ func (dsc *DaemonSetsController) rollingUpdate(ds *extensions.DaemonSet, hash st
|
||||
// constructHistory finds all histories controlled by the given DaemonSet, and
|
||||
// update current history revision number, or create current history if need to.
|
||||
// It also deduplicates current history, and adds missing unique labels to existing histories.
|
||||
func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
|
||||
func (dsc *DaemonSetsController) constructHistory(ds *apps.DaemonSet) (cur *apps.ControllerRevision, old []*apps.ControllerRevision, err error) {
|
||||
var histories []*apps.ControllerRevision
|
||||
var currentHistories []*apps.ControllerRevision
|
||||
histories, err = dsc.controlledHistories(ds)
|
||||
@ -92,10 +91,10 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
||||
for _, history := range histories {
|
||||
// Add the unique label if it's not already added to the history
|
||||
// We use history name instead of computing hash, so that we don't need to worry about hash collision
|
||||
if _, ok := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||
if _, ok := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; !ok {
|
||||
toUpdate := history.DeepCopy()
|
||||
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = toUpdate.Name
|
||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -130,7 +129,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
||||
if cur.Revision < currRevision {
|
||||
toUpdate := cur.DeepCopy()
|
||||
toUpdate.Revision = currRevision
|
||||
_, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
_, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -139,7 +138,7 @@ func (dsc *DaemonSetsController) constructHistory(ds *extensions.DaemonSet) (cur
|
||||
return cur, old, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []*apps.ControllerRevision) error {
|
||||
func (dsc *DaemonSetsController) cleanupHistory(ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
|
||||
nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
|
||||
@ -155,7 +154,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []
|
||||
liveHashes := make(map[string]bool)
|
||||
for _, pods := range nodesToDaemonPods {
|
||||
for _, pod := range pods {
|
||||
if hash := pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
|
||||
if hash := pod.Labels[apps.DefaultDaemonSetUniqueLabelKey]; len(hash) > 0 {
|
||||
liveHashes[hash] = true
|
||||
}
|
||||
}
|
||||
@ -164,7 +163,7 @@ func (dsc *DaemonSetsController) cleanupHistory(ds *extensions.DaemonSet, old []
|
||||
// Find all live history with the above hashes
|
||||
liveHistory := make(map[string]bool)
|
||||
for _, history := range old {
|
||||
if hash := history.Labels[extensions.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
|
||||
if hash := history.Labels[apps.DefaultDaemonSetUniqueLabelKey]; liveHashes[hash] {
|
||||
liveHistory[history.Name] = true
|
||||
}
|
||||
}
|
||||
@ -199,7 +198,7 @@ func maxRevision(histories []*apps.ControllerRevision) int64 {
|
||||
return max
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||
func (dsc *DaemonSetsController) dedupCurHistories(ds *apps.DaemonSet, curHistories []*apps.ControllerRevision) (*apps.ControllerRevision, error) {
|
||||
if len(curHistories) == 1 {
|
||||
return curHistories[0], nil
|
||||
}
|
||||
@ -222,12 +221,12 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
|
||||
return nil, err
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey] {
|
||||
if pod.Labels[apps.DefaultDaemonSetUniqueLabelKey] != keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey] {
|
||||
toUpdate := pod.DeepCopy()
|
||||
if toUpdate.Labels == nil {
|
||||
toUpdate.Labels = make(map[string]string)
|
||||
}
|
||||
toUpdate.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
toUpdate.Labels[apps.DefaultDaemonSetUniqueLabelKey] = keepCur.Labels[apps.DefaultDaemonSetUniqueLabelKey]
|
||||
_, err = dsc.kubeClient.CoreV1().Pods(ds.Namespace).Update(toUpdate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -247,7 +246,7 @@ func (dsc *DaemonSetsController) dedupCurHistories(ds *extensions.DaemonSet, cur
|
||||
// This also reconciles ControllerRef by adopting/orphaning.
|
||||
// Note that returned histories are pointers to objects in the cache.
|
||||
// If you want to modify one, you need to deep-copy it first.
|
||||
func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) ([]*apps.ControllerRevision, error) {
|
||||
func (dsc *DaemonSetsController) controlledHistories(ds *apps.DaemonSet) ([]*apps.ControllerRevision, error) {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -277,7 +276,7 @@ func (dsc *DaemonSetsController) controlledHistories(ds *extensions.DaemonSet) (
|
||||
}
|
||||
|
||||
// Match check if the given DaemonSet's template matches the template stored in the given history.
|
||||
func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, error) {
|
||||
func Match(ds *apps.DaemonSet, history *apps.ControllerRevision) (bool, error) {
|
||||
patch, err := getPatch(ds)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -289,7 +288,7 @@ func Match(ds *extensions.DaemonSet, history *apps.ControllerRevision) (bool, er
|
||||
// previous version. If the returned error is nil the patch is valid. The current state that we save is just the
|
||||
// PodSpecTemplate. We can modify this later to encompass more state (or less) and remain compatible with previously
|
||||
// recorded patches.
|
||||
func getPatch(ds *extensions.DaemonSet) ([]byte, error) {
|
||||
func getPatch(ds *apps.DaemonSet) ([]byte, error) {
|
||||
dsBytes, err := json.Marshal(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -312,7 +311,7 @@ func getPatch(ds *extensions.DaemonSet) ([]byte, error) {
|
||||
return patch, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
|
||||
func (dsc *DaemonSetsController) snapshot(ds *apps.DaemonSet, revision int64) (*apps.ControllerRevision, error) {
|
||||
patch, err := getPatch(ds)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -323,7 +322,7 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ds.Namespace,
|
||||
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, extensions.DefaultDaemonSetUniqueLabelKey, hash),
|
||||
Labels: labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, hash),
|
||||
Annotations: ds.Annotations,
|
||||
OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)},
|
||||
},
|
||||
@ -331,10 +330,10 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
|
||||
Revision: revision,
|
||||
}
|
||||
|
||||
history, err = dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Create(history)
|
||||
history, err = dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Create(history)
|
||||
if errors.IsAlreadyExists(err) {
|
||||
// TODO: Is it okay to get from historyLister?
|
||||
existedHistory, getErr := dsc.kubeClient.AppsV1beta1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
existedHistory, getErr := dsc.kubeClient.AppsV1().ControllerRevisions(ds.Namespace).Get(name, metav1.GetOptions{})
|
||||
if getErr != nil {
|
||||
return nil, getErr
|
||||
}
|
||||
@ -367,13 +366,19 @@ func (dsc *DaemonSetsController) snapshot(ds *extensions.DaemonSet, revision int
|
||||
return history, err
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
|
||||
func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod, hash string) ([]*v1.Pod, []*v1.Pod) {
|
||||
var newPods []*v1.Pod
|
||||
var oldPods []*v1.Pod
|
||||
|
||||
for _, pods := range nodeToDaemonPods {
|
||||
for _, pod := range pods {
|
||||
if util.IsPodUpdated(ds.Spec.TemplateGeneration, pod, hash) {
|
||||
// If the returned error is not nil we have a parse error.
|
||||
// The controller handles this via the hash.
|
||||
generation, err := util.GetTemplateGeneration(ds)
|
||||
if err != nil {
|
||||
generation = nil
|
||||
}
|
||||
if util.IsPodUpdated(pod, hash, generation) {
|
||||
newPods = append(newPods, pod)
|
||||
} else {
|
||||
oldPods = append(oldPods, pod)
|
||||
@ -383,7 +388,7 @@ func (dsc *DaemonSetsController) getAllDaemonSetPods(ds *extensions.DaemonSet, n
|
||||
return newPods, oldPods
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *extensions.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
func (dsc *DaemonSetsController) getUnavailableNumbers(ds *apps.DaemonSet, nodeToDaemonPods map[string][]*v1.Pod) (int, int, error) {
|
||||
glog.V(4).Infof("Getting unavailable numbers")
|
||||
// TODO: get nodeList once in syncDaemonSet and pass it to other functions
|
||||
nodeList, err := dsc.nodeLister.List(labels.Everything())
|
||||
|
@ -19,8 +19,8 @@ package daemon
|
||||
import (
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
@ -38,10 +38,9 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.TemplateGeneration++
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
clearExpectations(t, manager, ds, podControl)
|
||||
@ -80,10 +79,9 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
|
||||
markPodsReady(podControl.podStore)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.TemplateGeneration++
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// new pods are not ready numUnavailable == maxUnavailable
|
||||
@ -109,10 +107,9 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.TemplateGeneration++
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// all old pods are unavailable so should be removed
|
||||
@ -137,9 +134,9 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 5, 0, 0)
|
||||
|
||||
ds.Spec.UpdateStrategy.Type = extensions.RollingUpdateDaemonSetStrategyType
|
||||
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
|
||||
intStr := intstr.FromInt(maxUnavailable)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
manager.dsStore.Update(ds)
|
||||
|
||||
// template is not changed no pod should be removed
|
||||
@ -152,7 +149,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
Manager *daemonSetsController
|
||||
ds *extensions.DaemonSet
|
||||
ds *apps.DaemonSet
|
||||
nodeToPods map[string][]*v1.Pod
|
||||
maxUnavailable int
|
||||
numUnavailable int
|
||||
@ -167,10 +164,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
}
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: make(map[string][]*v1.Pod),
|
||||
@ -187,10 +184,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(1)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@ -216,10 +213,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromInt(0)
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@ -242,10 +239,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
@ -271,10 +268,10 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
||||
addNodes(manager.nodeStore, 0, 2, nil)
|
||||
return manager
|
||||
}(),
|
||||
ds: func() *extensions.DaemonSet {
|
||||
ds: func() *apps.DaemonSet {
|
||||
ds := newDaemonSet("x")
|
||||
intStr := intstr.FromString("50%")
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &extensions.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
|
||||
return ds
|
||||
}(),
|
||||
nodeToPods: func() map[string][]*v1.Pod {
|
||||
|
@ -18,7 +18,9 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -28,13 +30,28 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
// deprecated annotation. If no annotation is found nil is returned. If the annotation is found and fails to parse
|
||||
// nil is returned with an error. If the generation can be parsed from the annotation, a pointer to the parsed int64
|
||||
// value is returned.
|
||||
func GetTemplateGeneration(ds *apps.DaemonSet) (*int64, error) {
|
||||
annotation, found := ds.Annotations[apps.DeprecatedTemplateGeneration]
|
||||
if !found {
|
||||
return nil, nil
|
||||
}
|
||||
generation, err := strconv.ParseInt(annotation, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &generation, nil
|
||||
}
|
||||
|
||||
// CreatePodTemplate returns copy of provided template with additional
|
||||
// label which contains templateGeneration (for backward compatibility),
|
||||
// hash of provided template and sets default daemon tolerations.
|
||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash string) v1.PodTemplateSpec {
|
||||
func CreatePodTemplate(template v1.PodTemplateSpec, generation *int64, hash string) v1.PodTemplateSpec {
|
||||
newTemplate := *template.DeepCopy()
|
||||
// DaemonSet pods shouldn't be deleted by NodeController in case of node problems.
|
||||
// Add infinite toleration for taint notReady:NoExecute here
|
||||
@ -81,12 +98,12 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin
|
||||
})
|
||||
}
|
||||
|
||||
templateGenerationStr := fmt.Sprint(generation)
|
||||
newTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
template.ObjectMeta.Labels,
|
||||
extensions.DaemonSetTemplateGenerationKey,
|
||||
templateGenerationStr,
|
||||
)
|
||||
if newTemplate.ObjectMeta.Labels == nil {
|
||||
newTemplate.ObjectMeta.Labels = make(map[string]string)
|
||||
}
|
||||
if generation != nil {
|
||||
newTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey] = fmt.Sprint(*generation)
|
||||
}
|
||||
// TODO: do we need to validate if the DaemonSet is RollingUpdate or not?
|
||||
if len(hash) > 0 {
|
||||
newTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey] = hash
|
||||
@ -94,10 +111,11 @@ func CreatePodTemplate(template v1.PodTemplateSpec, generation int64, hash strin
|
||||
return newTemplate
|
||||
}
|
||||
|
||||
// IsPodUpdated checks if pod contains label value that either matches templateGeneration or hash
|
||||
func IsPodUpdated(dsTemplateGeneration int64, pod *v1.Pod, hash string) bool {
|
||||
// IsPodUpdate checks if pod contains label value that either matches templateGeneration or hash
|
||||
func IsPodUpdated(pod *v1.Pod, hash string, dsTemplateGeneration *int64) bool {
|
||||
// Compare with hash to see if the pod is updated, need to maintain backward compatibility of templateGeneration
|
||||
templateMatches := pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
|
||||
templateMatches := dsTemplateGeneration != nil &&
|
||||
pod.Labels[extensions.DaemonSetTemplateGenerationKey] == fmt.Sprint(dsTemplateGeneration)
|
||||
hashMatches := len(hash) > 0 && pod.Labels[extensions.DefaultDaemonSetUniqueLabelKey] == hash
|
||||
return hashMatches || templateMatches
|
||||
}
|
||||
|
@ -47,13 +47,14 @@ func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
}
|
||||
|
||||
func TestIsPodUpdated(t *testing.T) {
|
||||
templateGeneration := int64(12345)
|
||||
templateGeneration := int64Ptr(12345)
|
||||
badGeneration := int64Ptr(12345)
|
||||
hash := "55555"
|
||||
labels := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration), extensions.DefaultDaemonSetUniqueLabelKey: hash}
|
||||
labelsNoHash := map[string]string{extensions.DaemonSetTemplateGenerationKey: fmt.Sprint(templateGeneration)}
|
||||
tests := []struct {
|
||||
test string
|
||||
templateGeneration int64
|
||||
templateGeneration *int64
|
||||
pod *v1.Pod
|
||||
hash string
|
||||
isUpdated bool
|
||||
@ -95,14 +96,14 @@ func TestIsPodUpdated(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"templateGeneration doesn't match, hash does",
|
||||
templateGeneration + 1,
|
||||
badGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"templateGeneration and hash don't match",
|
||||
templateGeneration + 1,
|
||||
badGeneration,
|
||||
newPod("pod1", "node1", labels),
|
||||
hash + "123",
|
||||
false,
|
||||
@ -130,7 +131,7 @@ func TestIsPodUpdated(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
updated := IsPodUpdated(test.templateGeneration, test.pod, test.hash)
|
||||
updated := IsPodUpdated(test.pod, test.hash, test.templateGeneration)
|
||||
if updated != test.isUpdated {
|
||||
t.Errorf("%s: IsPodUpdated returned wrong value. Expected %t, got %t", test.test, test.isUpdated, updated)
|
||||
}
|
||||
@ -139,19 +140,19 @@ func TestIsPodUpdated(t *testing.T) {
|
||||
|
||||
func TestCreatePodTemplate(t *testing.T) {
|
||||
tests := []struct {
|
||||
templateGeneration int64
|
||||
templateGeneration *int64
|
||||
hash string
|
||||
expectUniqueLabel bool
|
||||
}{
|
||||
{int64(1), "", false},
|
||||
{int64(2), "3242341807", true},
|
||||
{int64Ptr(1), "", false},
|
||||
{int64Ptr(2), "3242341807", true},
|
||||
}
|
||||
for _, test := range tests {
|
||||
podTemplateSpec := v1.PodTemplateSpec{}
|
||||
newPodTemplate := CreatePodTemplate(podTemplateSpec, test.templateGeneration, test.hash)
|
||||
val, exists := newPodTemplate.ObjectMeta.Labels[extensions.DaemonSetTemplateGenerationKey]
|
||||
if !exists || val != fmt.Sprint(test.templateGeneration) {
|
||||
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", test.templateGeneration, val)
|
||||
if !exists || val != fmt.Sprint(*test.templateGeneration) {
|
||||
t.Errorf("Expected podTemplateSpec to have generation label value: %d, got: %s", *test.templateGeneration, val)
|
||||
}
|
||||
val, exists = newPodTemplate.ObjectMeta.Labels[extensions.DefaultDaemonSetUniqueLabelKey]
|
||||
if test.expectUniqueLabel && (!exists || val != test.hash) {
|
||||
@ -162,3 +163,8 @@ func TestCreatePodTemplate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func int64Ptr(i int) *int64 {
|
||||
li := int64(i)
|
||||
return &li
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user