Fix HPA E2E CRD test
This commit is contained in:
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package resource
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -26,8 +27,10 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
scaleclient "k8s.io/client-go/scale"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@@ -73,6 +76,39 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
}
|
||||
return err
|
||||
}
|
||||
deleteObject := func() error {
|
||||
background := metav1.DeletePropagationBackground
|
||||
return testutils.DeleteResource(c, kind, ns, name, metav1.DeleteOptions{PropagationPolicy: &background})
|
||||
}
|
||||
return deleteObjectAndWaitForGC(c, rtObject, deleteObject, ns, name, kind.String())
|
||||
}
|
||||
|
||||
// DeleteCustomResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
|
||||
// Enables to provide a custom resourece client, e.g. to fetch a CRD object.
|
||||
func DeleteCustomResourceAndWaitForGC(c clientset.Interface, dynamicClient dynamic.Interface, scaleClient scaleclient.ScalesGetter, gvr schema.GroupVersionResource, ns, name string) error {
|
||||
ginkgo.By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", gvr, name, ns))
|
||||
resourceClient := dynamicClient.Resource(gvr).Namespace(ns)
|
||||
_, err := resourceClient.Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("%v %s not found: %v", gvr, name, err)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
scaleObj, err := scaleClient.Scales(ns).Get(context.TODO(), gvr.GroupResource(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error while trying to get scale subresource of kind %v with name %v: %v", gvr, name, err)
|
||||
return nil
|
||||
}
|
||||
deleteObject := func() error {
|
||||
background := metav1.DeletePropagationBackground
|
||||
return resourceClient.Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &background})
|
||||
}
|
||||
return deleteObjectAndWaitForGC(c, scaleObj, deleteObject, ns, name, gvr.String())
|
||||
}
|
||||
|
||||
func deleteObjectAndWaitForGC(c clientset.Interface, rtObject runtime.Object, deleteObject func() error, ns, name, description string) error {
|
||||
selector, err := GetSelectorFromRuntimeObject(rtObject)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -88,14 +124,18 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
}
|
||||
|
||||
defer ps.Stop()
|
||||
falseVar := false
|
||||
deleteOption := metav1.DeleteOptions{OrphanDependents: &falseVar}
|
||||
startTime := time.Now()
|
||||
if err := testutils.DeleteResourceWithRetries(c, kind, ns, name, deleteOption); err != nil {
|
||||
if err := testutils.RetryWithExponentialBackOff(func() (bool, error) {
|
||||
err := deleteObject()
|
||||
if err == nil || apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
deleteTime := time.Since(startTime)
|
||||
framework.Logf("Deleting %v %s took: %v", kind, name, deleteTime)
|
||||
framework.Logf("Deleting %v %s took: %v", description, name, deleteTime)
|
||||
|
||||
var interval, timeout time.Duration
|
||||
switch {
|
||||
@@ -119,7 +159,7 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
|
||||
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
||||
}
|
||||
terminatePodTime := time.Since(startTime) - deleteTime
|
||||
framework.Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
|
||||
framework.Logf("Terminating %v %s pods took: %v", description, name, terminatePodTime)
|
||||
|
||||
// In gce, at any point, small percentage of nodes can disappear for
|
||||
// ~10 minutes due to hostError. 20 minutes should be long enough to
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
@@ -79,6 +80,12 @@ func GetSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *batchv1.Job:
|
||||
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||
case *autoscalingv1.Scale:
|
||||
selector, err := metav1.ParseToLabelSelector(typed.Status.Selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %v", obj, err)
|
||||
}
|
||||
return metav1.LabelSelectorAsSelector(selector)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
||||
}
|
||||
@@ -124,6 +131,8 @@ func GetReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
|
||||
return *typed.Spec.Parallelism, nil
|
||||
}
|
||||
return 0, nil
|
||||
case *autoscalingv1.Scale:
|
||||
return typed.Status.Replicas, nil
|
||||
default:
|
||||
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user