experimental. -> extensions.
This commit is contained in:
@@ -103,22 +103,22 @@ func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controlle
|
||||
return dsc.kubeClient.Experimental().DaemonSets(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
|
||||
},
|
||||
},
|
||||
&experimental.DaemonSet{},
|
||||
&extensions.DaemonSet{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullDaemonSetResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
ds := obj.(*experimental.DaemonSet)
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
glog.V(4).Infof("Adding daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(obj)
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldDS := old.(*experimental.DaemonSet)
|
||||
oldDS := old.(*extensions.DaemonSet)
|
||||
glog.V(4).Infof("Updating daemon set %s", oldDS.Name)
|
||||
dsc.enqueueDaemonSet(cur)
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
ds := obj.(*experimental.DaemonSet)
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
glog.V(4).Infof("Deleting daemon set %s", ds.Name)
|
||||
dsc.enqueueDaemonSet(obj)
|
||||
},
|
||||
@@ -218,7 +218,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSet(obj interface{}) {
|
||||
dsc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *experimental.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *extensions.DaemonSet {
|
||||
sets, err := dsc.dsStore.GetPodDaemonSets(pod)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name)
|
||||
@@ -321,7 +321,7 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *experimental.DaemonSet) (map[string][]*api.Pod, error) {
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*api.Pod, error) {
|
||||
nodeToDaemonPods := make(map[string][]*api.Pod)
|
||||
daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(labels.Set(ds.Spec.Selector).AsSelector())
|
||||
if err != nil {
|
||||
@@ -334,7 +334,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *experimental.DaemonSet
|
||||
return nodeToDaemonPods, nil
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) manage(ds *experimental.DaemonSet) {
|
||||
func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
|
||||
// Find out which nodes are running the daemon pods selected by ds.
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
@@ -401,7 +401,7 @@ func (dsc *DaemonSetsController) manage(ds *experimental.DaemonSet) {
|
||||
}
|
||||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient client.DaemonSetInterface, ds *experimental.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
|
||||
func storeDaemonSetStatus(dsClient client.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
|
||||
if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled {
|
||||
return nil
|
||||
}
|
||||
@@ -426,7 +426,7 @@ func storeDaemonSetStatus(dsClient client.DaemonSetInterface, ds *experimental.D
|
||||
return updateErr
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *experimental.DaemonSet) {
|
||||
func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet) {
|
||||
glog.V(4).Infof("Updating daemon set status")
|
||||
nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ds)
|
||||
if err != nil {
|
||||
@@ -482,7 +482,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
dsc.expectations.DeleteExpectations(key)
|
||||
return nil
|
||||
}
|
||||
ds := obj.(*experimental.DaemonSet)
|
||||
ds := obj.(*extensions.DaemonSet)
|
||||
if !dsc.podStoreSynced() {
|
||||
// Sleep so we give the pod reflector goroutine a chance to run.
|
||||
time.Sleep(PodStoreSyncedPollPeriod)
|
||||
@@ -509,7 +509,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []experimental.DaemonSet
|
||||
type byCreationTimestamp []extensions.DaemonSet
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
@@ -42,7 +42,7 @@ func init() {
|
||||
api.ForTesting_ReferencesAllowBlankSelfLinks = true
|
||||
}
|
||||
|
||||
func getKey(ds *experimental.DaemonSet, t *testing.T) string {
|
||||
func getKey(ds *extensions.DaemonSet, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(ds); err != nil {
|
||||
t.Errorf("Unexpected error getting key for ds %v: %v", ds.Name, err)
|
||||
return ""
|
||||
@@ -51,14 +51,14 @@ func getKey(ds *experimental.DaemonSet, t *testing.T) string {
|
||||
}
|
||||
}
|
||||
|
||||
func newDaemonSet(name string) *experimental.DaemonSet {
|
||||
return &experimental.DaemonSet{
|
||||
func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Extensions.Version()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
},
|
||||
Spec: experimental.DaemonSetSpec{
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: simpleDaemonSetLabel,
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
@@ -146,7 +146,7 @@ func validateSyncDaemonSets(t *testing.T, fakePodControl *controller.FakePodCont
|
||||
}
|
||||
}
|
||||
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *experimental.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||
func syncAndValidateDaemonSets(t *testing.T, manager *DaemonSetsController, ds *extensions.DaemonSet, podControl *controller.FakePodControl, expectedCreates, expectedDeletes int) {
|
||||
key, err := controller.KeyFunc(ds)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get key for daemon.")
|
||||
|
@@ -73,22 +73,22 @@ func (d *DeploymentController) reconcileDeployments() []error {
|
||||
return errs
|
||||
}
|
||||
|
||||
func (d *DeploymentController) reconcileDeployment(deployment *experimental.Deployment) error {
|
||||
func (d *DeploymentController) reconcileDeployment(deployment *extensions.Deployment) error {
|
||||
switch deployment.Spec.Strategy.Type {
|
||||
case experimental.RecreateDeploymentStrategyType:
|
||||
case extensions.RecreateDeploymentStrategyType:
|
||||
return d.reconcileRecreateDeployment(*deployment)
|
||||
case experimental.RollingUpdateDeploymentStrategyType:
|
||||
case extensions.RollingUpdateDeploymentStrategyType:
|
||||
return d.reconcileRollingUpdateDeployment(*deployment)
|
||||
}
|
||||
return fmt.Errorf("unexpected deployment strategy type: %s", deployment.Spec.Strategy.Type)
|
||||
}
|
||||
|
||||
func (d *DeploymentController) reconcileRecreateDeployment(deployment experimental.Deployment) error {
|
||||
func (d *DeploymentController) reconcileRecreateDeployment(deployment extensions.Deployment) error {
|
||||
// TODO: implement me.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DeploymentController) reconcileRollingUpdateDeployment(deployment experimental.Deployment) error {
|
||||
func (d *DeploymentController) reconcileRollingUpdateDeployment(deployment extensions.Deployment) error {
|
||||
newRC, err := d.getNewRC(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -124,13 +124,13 @@ func (d *DeploymentController) reconcileRollingUpdateDeployment(deployment exper
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *DeploymentController) getOldRCs(deployment experimental.Deployment) ([]*api.ReplicationController, error) {
|
||||
func (d *DeploymentController) getOldRCs(deployment extensions.Deployment) ([]*api.ReplicationController, error) {
|
||||
return deploymentUtil.GetOldRCs(deployment, d.client)
|
||||
}
|
||||
|
||||
// Returns an RC that matches the intent of the given deployment.
|
||||
// It creates a new RC if required.
|
||||
func (d *DeploymentController) getNewRC(deployment experimental.Deployment) (*api.ReplicationController, error) {
|
||||
func (d *DeploymentController) getNewRC(deployment extensions.Deployment) (*api.ReplicationController, error) {
|
||||
existingNewRC, err := deploymentUtil.GetNewRC(deployment, d.client)
|
||||
if err != nil || existingNewRC != nil {
|
||||
return existingNewRC, err
|
||||
@@ -158,7 +158,7 @@ func (d *DeploymentController) getNewRC(deployment experimental.Deployment) (*ap
|
||||
return createdRC, nil
|
||||
}
|
||||
|
||||
func (d *DeploymentController) reconcileNewRC(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) (bool, error) {
|
||||
func (d *DeploymentController) reconcileNewRC(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment) (bool, error) {
|
||||
if newRC.Spec.Replicas == deployment.Spec.Replicas {
|
||||
// Scaling not required.
|
||||
return false, nil
|
||||
@@ -192,7 +192,7 @@ func (d *DeploymentController) reconcileNewRC(allRCs []*api.ReplicationControlle
|
||||
return true, err
|
||||
}
|
||||
|
||||
func (d *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) (bool, error) {
|
||||
func (d *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment) (bool, error) {
|
||||
oldPodsCount := deploymentUtil.GetReplicaCountForRCs(oldRCs)
|
||||
if oldPodsCount == 0 {
|
||||
// Cant scale down further
|
||||
@@ -239,12 +239,12 @@ func (d *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationControll
|
||||
return true, err
|
||||
}
|
||||
|
||||
func (d *DeploymentController) updateDeploymentStatus(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) error {
|
||||
func (d *DeploymentController) updateDeploymentStatus(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment) error {
|
||||
totalReplicas := deploymentUtil.GetReplicaCountForRCs(allRCs)
|
||||
updatedReplicas := deploymentUtil.GetReplicaCountForRCs([]*api.ReplicationController{newRC})
|
||||
newDeployment := deployment
|
||||
// TODO: Reconcile this with API definition. API definition talks about ready pods, while this just computes created pods.
|
||||
newDeployment.Status = experimental.DeploymentStatus{
|
||||
newDeployment.Status = extensions.DeploymentStatus{
|
||||
Replicas: totalReplicas,
|
||||
UpdatedReplicas: updatedReplicas,
|
||||
}
|
||||
@@ -252,7 +252,7 @@ func (d *DeploymentController) updateDeploymentStatus(allRCs []*api.ReplicationC
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *DeploymentController) scaleRCAndRecordEvent(rc *api.ReplicationController, newScale int, deployment experimental.Deployment) (*api.ReplicationController, error) {
|
||||
func (d *DeploymentController) scaleRCAndRecordEvent(rc *api.ReplicationController, newScale int, deployment extensions.Deployment) (*api.ReplicationController, error) {
|
||||
scalingOperation := "down"
|
||||
if rc.Spec.Replicas < newScale {
|
||||
scalingOperation = "up"
|
||||
@@ -270,7 +270,7 @@ func (d *DeploymentController) scaleRC(rc *api.ReplicationController, newScale i
|
||||
return d.client.ReplicationControllers(rc.ObjectMeta.Namespace).Update(rc)
|
||||
}
|
||||
|
||||
func (d *DeploymentController) updateDeployment(deployment *experimental.Deployment) (*experimental.Deployment, error) {
|
||||
func (d *DeploymentController) updateDeployment(deployment *extensions.Deployment) (*extensions.Deployment, error) {
|
||||
// TODO: Using client for now, update to use store when it is ready.
|
||||
return d.client.Experimental().Deployments(deployment.ObjectMeta.Namespace).Update(deployment)
|
||||
}
|
||||
|
@@ -45,7 +45,7 @@ type JobController struct {
|
||||
podControl controller.PodControlInterface
|
||||
|
||||
// To allow injection of updateJobStatus for testing.
|
||||
updateHandler func(job *experimental.Job) error
|
||||
updateHandler func(job *extensions.Job) error
|
||||
syncHandler func(jobKey string) error
|
||||
// podStoreSynced returns true if the pod store has been synced at least once.
|
||||
// Added as a member to the struct to allow injection for testing.
|
||||
@@ -92,13 +92,13 @@ func NewJobController(kubeClient client.Interface, resyncPeriod controller.Resyn
|
||||
return jm.kubeClient.Experimental().Jobs(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
|
||||
},
|
||||
},
|
||||
&experimental.Job{},
|
||||
&extensions.Job{},
|
||||
// TODO: Can we have much longer period here?
|
||||
replicationcontroller.FullControllerResyncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: jm.enqueueController,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if job := cur.(*experimental.Job); !isJobFinished(job) {
|
||||
if job := cur.(*extensions.Job); !isJobFinished(job) {
|
||||
jm.enqueueController(job)
|
||||
}
|
||||
},
|
||||
@@ -144,7 +144,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
// getPodJob returns the job managing the given pod.
|
||||
func (jm *JobController) getPodJob(pod *api.Pod) *experimental.Job {
|
||||
func (jm *JobController) getPodJob(pod *api.Pod) *extensions.Job {
|
||||
jobs, err := jm.jobStore.GetPodJobs(pod)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("No jobs found for pod %v, job controller will avoid syncing", pod.Name)
|
||||
@@ -240,7 +240,7 @@ func (jm *JobController) deletePod(obj interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// obj could be an *experimental.Job, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *extensions.Job, or a DeletionFinalStateUnknown marker item.
|
||||
func (jm *JobController) enqueueController(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
@@ -295,7 +295,7 @@ func (jm *JobController) syncJob(key string) error {
|
||||
jm.queue.Add(key)
|
||||
return err
|
||||
}
|
||||
job := *obj.(*experimental.Job)
|
||||
job := *obj.(*extensions.Job)
|
||||
if !jm.podStoreSynced() {
|
||||
// Sleep so we give the pod reflector goroutine a chance to run.
|
||||
time.Sleep(replicationcontroller.PodStoreSyncedPollPeriod)
|
||||
@@ -345,9 +345,9 @@ func (jm *JobController) syncJob(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCondition() experimental.JobCondition {
|
||||
return experimental.JobCondition{
|
||||
Type: experimental.JobComplete,
|
||||
func newCondition() extensions.JobCondition {
|
||||
return extensions.JobCondition{
|
||||
Type: extensions.JobComplete,
|
||||
Status: api.ConditionTrue,
|
||||
LastProbeTime: unversioned.Now(),
|
||||
LastTransitionTime: unversioned.Now(),
|
||||
@@ -360,7 +360,7 @@ func getStatus(pods []api.Pod) (succeeded, failed int) {
|
||||
return
|
||||
}
|
||||
|
||||
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *experimental.Job) int {
|
||||
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *extensions.Job) int {
|
||||
var activeLock sync.Mutex
|
||||
active := len(activePods)
|
||||
parallelism := *job.Spec.Parallelism
|
||||
@@ -430,7 +430,7 @@ func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int, job *ex
|
||||
return active
|
||||
}
|
||||
|
||||
func (jm *JobController) updateJobStatus(job *experimental.Job) error {
|
||||
func (jm *JobController) updateJobStatus(job *extensions.Job) error {
|
||||
_, err := jm.kubeClient.Experimental().Jobs(job.Namespace).UpdateStatus(job)
|
||||
return err
|
||||
}
|
||||
@@ -446,9 +446,9 @@ func filterPods(pods []api.Pod, phase api.PodPhase) int {
|
||||
return result
|
||||
}
|
||||
|
||||
func isJobFinished(j *experimental.Job) bool {
|
||||
func isJobFinished(j *extensions.Job) bool {
|
||||
for _, c := range j.Status.Conditions {
|
||||
if c.Type == experimental.JobComplete && c.Status == api.ConditionTrue {
|
||||
if c.Type == extensions.JobComplete && c.Status == api.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -456,7 +456,7 @@ func isJobFinished(j *experimental.Job) bool {
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []experimental.Job
|
||||
type byCreationTimestamp []extensions.Job
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
@@ -34,13 +34,13 @@ import (
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func newJob(parallelism, completions int) *experimental.Job {
|
||||
return &experimental.Job{
|
||||
func newJob(parallelism, completions int) *extensions.Job {
|
||||
return &extensions.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
},
|
||||
Spec: experimental.JobSpec{
|
||||
Spec: extensions.JobSpec{
|
||||
Parallelism: ¶llelism,
|
||||
Completions: &completions,
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
@@ -60,7 +60,7 @@ func newJob(parallelism, completions int) *experimental.Job {
|
||||
}
|
||||
}
|
||||
|
||||
func getKey(job *experimental.Job, t *testing.T) string {
|
||||
func getKey(job *extensions.Job, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(job); err != nil {
|
||||
t.Errorf("Unexpected error getting key for job %v: %v", job.Name, err)
|
||||
return ""
|
||||
@@ -70,7 +70,7 @@ func getKey(job *experimental.Job, t *testing.T) string {
|
||||
}
|
||||
|
||||
// create count pods with the given phase for the given job
|
||||
func newPodList(count int, status api.PodPhase, job *experimental.Job) []api.Pod {
|
||||
func newPodList(count int, status api.PodPhase, job *extensions.Job) []api.Pod {
|
||||
pods := []api.Pod{}
|
||||
for i := 0; i < count; i++ {
|
||||
newPod := api.Pod{
|
||||
@@ -165,8 +165,8 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
fakePodControl := controller.FakePodControl{Err: tc.podControllerError}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
var actual *experimental.Job
|
||||
manager.updateHandler = func(job *experimental.Job) error {
|
||||
var actual *extensions.Job
|
||||
manager.updateHandler = func(job *extensions.Job) error {
|
||||
actual = job
|
||||
return nil
|
||||
}
|
||||
@@ -211,7 +211,7 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
if tc.expectedComplete {
|
||||
completed := false
|
||||
for _, v := range actual.Status.Conditions {
|
||||
if v.Type == experimental.JobComplete && v.Status == api.ConditionTrue {
|
||||
if v.Type == extensions.JobComplete && v.Status == api.ConditionTrue {
|
||||
completed = true
|
||||
break
|
||||
}
|
||||
@@ -229,7 +229,7 @@ func TestSyncJobDeleted(t *testing.T) {
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.updateHandler = func(job *experimental.Job) error { return nil }
|
||||
manager.updateHandler = func(job *extensions.Job) error { return nil }
|
||||
job := newJob(2, 2)
|
||||
err := manager.syncJob(getKey(job, t))
|
||||
if err != nil {
|
||||
@@ -249,7 +249,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.updateHandler = func(job *experimental.Job) error { return fmt.Errorf("Fake error") }
|
||||
manager.updateHandler = func(job *extensions.Job) error { return fmt.Errorf("Fake error") }
|
||||
job := newJob(2, 2)
|
||||
manager.jobStore.Store.Add(job)
|
||||
err := manager.syncJob(getKey(job, t))
|
||||
@@ -269,14 +269,14 @@ func TestJobPodLookup(t *testing.T) {
|
||||
manager := NewJobController(client, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
testCases := []struct {
|
||||
job *experimental.Job
|
||||
job *extensions.Job
|
||||
pod *api.Pod
|
||||
|
||||
expectedName string
|
||||
}{
|
||||
// pods without labels don't match any job
|
||||
{
|
||||
job: &experimental.Job{
|
||||
job: &extensions.Job{
|
||||
ObjectMeta: api.ObjectMeta{Name: "basic"},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
@@ -286,9 +286,9 @@ func TestJobPodLookup(t *testing.T) {
|
||||
},
|
||||
// matching labels, different namespace
|
||||
{
|
||||
job: &experimental.Job{
|
||||
job: &extensions.Job{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
||||
Spec: experimental.JobSpec{
|
||||
Spec: extensions.JobSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
@@ -303,9 +303,9 @@ func TestJobPodLookup(t *testing.T) {
|
||||
},
|
||||
// matching ns and labels returns
|
||||
{
|
||||
job: &experimental.Job{
|
||||
job: &extensions.Job{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: experimental.JobSpec{
|
||||
Spec: extensions.JobSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
@@ -350,7 +350,7 @@ func TestSyncJobExpectations(t *testing.T) {
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.updateHandler = func(job *experimental.Job) error { return nil }
|
||||
manager.updateHandler = func(job *extensions.Job) error { return nil }
|
||||
|
||||
job := newJob(2, 2)
|
||||
manager.jobStore.Store.Add(job)
|
||||
@@ -386,7 +386,7 @@ func TestWatchJobs(t *testing.T) {
|
||||
manager := NewJobController(client, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
var testJob experimental.Job
|
||||
var testJob extensions.Job
|
||||
received := make(chan struct{})
|
||||
|
||||
// The update sent through the fakeWatcher should make its way into the workqueue,
|
||||
@@ -397,7 +397,7 @@ func TestWatchJobs(t *testing.T) {
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find job under key %v", key)
|
||||
}
|
||||
job := *obj.(*experimental.Job)
|
||||
job := *obj.(*extensions.Job)
|
||||
if !api.Semantic.DeepDerivative(job, testJob) {
|
||||
t.Errorf("Expected %#v, but got %#v", testJob, job)
|
||||
}
|
||||
@@ -419,10 +419,10 @@ func TestWatchJobs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestIsJobFinished(t *testing.T) {
|
||||
job := &experimental.Job{
|
||||
Status: experimental.JobStatus{
|
||||
Conditions: []experimental.JobCondition{{
|
||||
Type: experimental.JobComplete,
|
||||
job := &extensions.Job{
|
||||
Status: extensions.JobStatus{
|
||||
Conditions: []extensions.JobCondition{{
|
||||
Type: extensions.JobComplete,
|
||||
Status: api.ConditionTrue,
|
||||
}},
|
||||
},
|
||||
@@ -462,7 +462,7 @@ func TestWatchPods(t *testing.T) {
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find job under key %v", key)
|
||||
}
|
||||
job := obj.(*experimental.Job)
|
||||
job := obj.(*extensions.Job)
|
||||
if !api.Semantic.DeepDerivative(job, testJob) {
|
||||
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
|
||||
}
|
||||
|
@@ -29,7 +29,7 @@ const (
|
||||
// A PV created specifically for one claim must contain this annotation in order to bind to the claim.
|
||||
// The value must be the namespace and name of the claim being bound to (i.e, claim.Namespace/claim.Name)
|
||||
// This is an experimental feature and likely to change in the future.
|
||||
createdForKey = "volume.experimental.kubernetes.io/provisioned-for"
|
||||
createdForKey = "volume.extensions.kubernetes.io/provisioned-for"
|
||||
)
|
||||
|
||||
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes indexed by AccessModes and ordered by storage capacity.
|
||||
|
@@ -68,7 +68,7 @@ func (a *HorizontalController) Run(syncPeriod time.Duration) {
|
||||
}, syncPeriod, util.NeverStop)
|
||||
}
|
||||
|
||||
func (a *HorizontalController) reconcileAutoscaler(hpa experimental.HorizontalPodAutoscaler) error {
|
||||
func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodAutoscaler) error {
|
||||
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Namespace, hpa.Spec.ScaleRef.Name)
|
||||
|
||||
scale, err := a.client.Experimental().Scales(hpa.Spec.ScaleRef.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
|
||||
@@ -137,7 +137,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa experimental.HorizontalPo
|
||||
desiredReplicas = currentReplicas
|
||||
}
|
||||
|
||||
hpa.Status = experimental.HorizontalPodAutoscalerStatus{
|
||||
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
|
||||
CurrentReplicas: currentReplicas,
|
||||
DesiredReplicas: desiredReplicas,
|
||||
CurrentConsumption: currentConsumption,
|
||||
|
@@ -77,16 +77,16 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||
|
||||
fakeClient := &testclient.Fake{}
|
||||
fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &experimental.HorizontalPodAutoscalerList{
|
||||
Items: []experimental.HorizontalPodAutoscaler{
|
||||
obj := &extensions.HorizontalPodAutoscalerList{
|
||||
Items: []extensions.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: hpaName,
|
||||
Namespace: namespace,
|
||||
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
|
||||
},
|
||||
Spec: experimental.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: &experimental.SubresourceReference{
|
||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: &extensions.SubresourceReference{
|
||||
Kind: "replicationController",
|
||||
Name: rcName,
|
||||
Namespace: namespace,
|
||||
@@ -94,7 +94,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||
},
|
||||
MinReplicas: tc.minReplicas,
|
||||
MaxReplicas: tc.maxReplicas,
|
||||
Target: experimental.ResourceConsumption{Resource: tc.targetResource, Quantity: tc.targetLevel},
|
||||
Target: extensions.ResourceConsumption{Resource: tc.targetResource, Quantity: tc.targetLevel},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -103,15 +103,15 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("get", "replicationController", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &experimental.Scale{
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: rcName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: experimental.ScaleSpec{
|
||||
Spec: extensions.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: experimental.ScaleStatus{
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: map[string]string{"name": podNamePrefix},
|
||||
},
|
||||
@@ -155,15 +155,15 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "replicationController", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(testclient.UpdateAction).GetObject().(*experimental.Scale)
|
||||
replicas := action.(testclient.UpdateAction).GetObject().(*experimental.Scale).Spec.Replicas
|
||||
obj := action.(testclient.UpdateAction).GetObject().(*extensions.Scale)
|
||||
replicas := action.(testclient.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas)
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(testclient.UpdateAction).GetObject().(*experimental.HorizontalPodAutoscaler)
|
||||
obj := action.(testclient.UpdateAction).GetObject().(*extensions.HorizontalPodAutoscaler)
|
||||
assert.Equal(t, namespace, obj.Namespace)
|
||||
assert.Equal(t, hpaName, obj.Name)
|
||||
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas)
|
||||
|
@@ -47,12 +47,12 @@ type MetricsClient interface {
|
||||
|
||||
type ResourceConsumptionClient interface {
|
||||
// Gets average resource consumption for pods under the given selector.
|
||||
Get(resourceName api.ResourceName, selector map[string]string) (*experimental.ResourceConsumption, error)
|
||||
Get(resourceName api.ResourceName, selector map[string]string) (*extensions.ResourceConsumption, error)
|
||||
}
|
||||
|
||||
// Aggregates results into ResourceConsumption. Also returns number of
|
||||
// pods included in the aggregation.
|
||||
type metricAggregator func(heapster.MetricResultList) (experimental.ResourceConsumption, int)
|
||||
type metricAggregator func(heapster.MetricResultList) (extensions.ResourceConsumption, int)
|
||||
|
||||
type metricDefinition struct {
|
||||
name string
|
||||
@@ -76,23 +76,23 @@ func NewHeapsterMetricsClient(client client.Interface) *HeapsterMetricsClient {
|
||||
|
||||
var heapsterMetricDefinitions = map[api.ResourceName]metricDefinition{
|
||||
api.ResourceCPU: {"cpu-usage",
|
||||
func(metrics heapster.MetricResultList) (experimental.ResourceConsumption, int) {
|
||||
func(metrics heapster.MetricResultList) (extensions.ResourceConsumption, int) {
|
||||
sum, count := calculateSumFromLatestSample(metrics)
|
||||
value := "0"
|
||||
if count > 0 {
|
||||
// assumes that cpu usage is in millis
|
||||
value = fmt.Sprintf("%dm", sum/uint64(count))
|
||||
}
|
||||
return experimental.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count
|
||||
return extensions.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count
|
||||
}},
|
||||
api.ResourceMemory: {"memory-usage",
|
||||
func(metrics heapster.MetricResultList) (experimental.ResourceConsumption, int) {
|
||||
func(metrics heapster.MetricResultList) (extensions.ResourceConsumption, int) {
|
||||
sum, count := calculateSumFromLatestSample(metrics)
|
||||
value := int64(0)
|
||||
if count > 0 {
|
||||
value = int64(sum) / int64(count)
|
||||
}
|
||||
return experimental.ResourceConsumption{Resource: api.ResourceMemory, Quantity: *resource.NewQuantity(value, resource.DecimalSI)}, count
|
||||
return extensions.ResourceConsumption{Resource: api.ResourceMemory, Quantity: *resource.NewQuantity(value, resource.DecimalSI)}, count
|
||||
}},
|
||||
}
|
||||
|
||||
@@ -104,7 +104,7 @@ func (h *HeapsterMetricsClient) ResourceConsumption(namespace string) ResourceCo
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, selector map[string]string) (*experimental.ResourceConsumption, error) {
|
||||
func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, selector map[string]string) (*extensions.ResourceConsumption, error) {
|
||||
podList, err := h.client.Pods(h.namespace).
|
||||
List(labels.SelectorFromSet(labels.Set(selector)), fields.Everything())
|
||||
|
||||
@@ -118,7 +118,7 @@ func (h *HeapsterResourceConsumptionClient) Get(resourceName api.ResourceName, s
|
||||
return h.getForPods(resourceName, podNames)
|
||||
}
|
||||
|
||||
func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.ResourceName, podNames []string) (*experimental.ResourceConsumption, error) {
|
||||
func (h *HeapsterResourceConsumptionClient) getForPods(resourceName api.ResourceName, podNames []string) (*extensions.ResourceConsumption, error) {
|
||||
metricSpec, metricDefined := h.resourceDefinitions[resourceName]
|
||||
if !metricDefined {
|
||||
return nil, fmt.Errorf("heapster metric not defined for %v", resourceName)
|
||||
|
@@ -122,7 +122,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *testclient.Fake {
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func (tc *testCase) verifyResults(t *testing.T, val *experimental.ResourceConsumption, err error) {
|
||||
func (tc *testCase) verifyResults(t *testing.T, val *extensions.ResourceConsumption, err error) {
|
||||
assert.Equal(t, tc.desiredError, err)
|
||||
if tc.desiredError != nil {
|
||||
return
|
||||
|
Reference in New Issue
Block a user