|
|
@@ -63,7 +63,6 @@ import (
|
|
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
|
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
|
|
"k8s.io/apimachinery/pkg/watch"
|
|
|
|
"k8s.io/apimachinery/pkg/watch"
|
|
|
|
testutil "k8s.io/kubernetes/test/utils"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"k8s.io/client-go/discovery"
|
|
|
|
"k8s.io/client-go/discovery"
|
|
|
|
"k8s.io/client-go/dynamic"
|
|
|
|
"k8s.io/client-go/dynamic"
|
|
|
@@ -99,7 +98,7 @@ import (
|
|
|
|
utilversion "k8s.io/kubernetes/pkg/util/version"
|
|
|
|
utilversion "k8s.io/kubernetes/pkg/util/version"
|
|
|
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
|
|
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
|
|
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
|
|
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
|
|
|
testutils "k8s.io/kubernetes/test/utils"
|
|
|
|
testutil "k8s.io/kubernetes/test/utils"
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
const (
|
|
|
@@ -585,7 +584,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
|
|
|
|
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
|
|
|
|
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
res, err := testutils.PodRunningReady(&pod)
|
|
|
|
res, err := testutil.PodRunningReady(&pod)
|
|
|
|
switch {
|
|
|
|
switch {
|
|
|
|
case res && err == nil:
|
|
|
|
case res && err == nil:
|
|
|
|
nOk++
|
|
|
|
nOk++
|
|
|
@@ -653,7 +652,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
|
|
|
|
}
|
|
|
|
}
|
|
|
|
logFunc("Running kubectl logs on non-ready containers in %v", ns)
|
|
|
|
logFunc("Running kubectl logs on non-ready containers in %v", ns)
|
|
|
|
for _, pod := range podList.Items {
|
|
|
|
for _, pod := range podList.Items {
|
|
|
|
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
|
|
|
|
if res, err := testutil.PodRunningReady(&pod); !res || err != nil {
|
|
|
|
kubectlLogPod(c, pod, "", Logf)
|
|
|
|
kubectlLogPod(c, pod, "", Logf)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@@ -776,7 +775,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
|
|
|
|
Logf("Waiting for pod %[1]s in namespace '%[2]s' status to be '%[3]s'"+
|
|
|
|
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
|
|
|
|
"(found phase: %[4]q, readiness: %[5]t) (%[6]v elapsed)",
|
|
|
|
podName, ns, desc, pod.Status.Phase, testutils.PodReady(pod), time.Since(start))
|
|
|
|
podName, ns, desc, pod.Status.Phase, testutil.PodReady(pod), time.Since(start))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
|
|
|
|
return fmt.Errorf("gave up waiting for pod '%s' to be '%s' after %v", podName, desc, timeout)
|
|
|
|
}
|
|
|
|
}
|
|
|
@@ -2280,25 +2279,25 @@ func (f *Framework) MatchContainerOutput(
|
|
|
|
return nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func RunDeployment(config testutils.DeploymentConfig) error {
|
|
|
|
func RunDeployment(config testutil.DeploymentConfig) error {
|
|
|
|
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
|
|
|
|
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
|
|
|
|
config.NodeDumpFunc = DumpNodeDebugInfo
|
|
|
|
config.NodeDumpFunc = DumpNodeDebugInfo
|
|
|
|
config.ContainerDumpFunc = LogFailedContainers
|
|
|
|
config.ContainerDumpFunc = LogFailedContainers
|
|
|
|
return testutils.RunDeployment(config)
|
|
|
|
return testutil.RunDeployment(config)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func RunReplicaSet(config testutils.ReplicaSetConfig) error {
|
|
|
|
func RunReplicaSet(config testutil.ReplicaSetConfig) error {
|
|
|
|
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
|
|
|
By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
|
|
|
|
config.NodeDumpFunc = DumpNodeDebugInfo
|
|
|
|
config.NodeDumpFunc = DumpNodeDebugInfo
|
|
|
|
config.ContainerDumpFunc = LogFailedContainers
|
|
|
|
config.ContainerDumpFunc = LogFailedContainers
|
|
|
|
return testutils.RunReplicaSet(config)
|
|
|
|
return testutil.RunReplicaSet(config)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func RunRC(config testutils.RCConfig) error {
|
|
|
|
func RunRC(config testutil.RCConfig) error {
|
|
|
|
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
|
|
|
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
|
|
|
config.NodeDumpFunc = DumpNodeDebugInfo
|
|
|
|
config.NodeDumpFunc = DumpNodeDebugInfo
|
|
|
|
config.ContainerDumpFunc = LogFailedContainers
|
|
|
|
config.ContainerDumpFunc = LogFailedContainers
|
|
|
|
return testutils.RunRC(config)
|
|
|
|
return testutil.RunRC(config)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
|
|
|
type EventsLister func(opts metav1.ListOptions, ns string) (*v1.EventList, error)
|
|
|
@@ -2572,7 +2571,7 @@ func GetTTLAnnotationFromNode(node *v1.Node) (time.Duration, bool) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
|
|
|
|
func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, labelValue string) {
|
|
|
|
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
|
|
|
|
ExpectNoError(testutil.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
|
|
|
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
|
|
|
@@ -2595,10 +2594,10 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Tai
|
|
|
|
// won't fail if target label doesn't exist or has been removed.
|
|
|
|
// won't fail if target label doesn't exist or has been removed.
|
|
|
|
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
|
|
|
|
func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string) {
|
|
|
|
By("removing the label " + labelKey + " off the node " + nodeName)
|
|
|
|
By("removing the label " + labelKey + " off the node " + nodeName)
|
|
|
|
ExpectNoError(testutils.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
|
|
|
|
ExpectNoError(testutil.RemoveLabelOffNode(c, nodeName, []string{labelKey}))
|
|
|
|
|
|
|
|
|
|
|
|
By("verifying the node doesn't have the label " + labelKey)
|
|
|
|
By("verifying the node doesn't have the label " + labelKey)
|
|
|
|
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
|
|
|
|
ExpectNoError(testutil.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
|
|
|
func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Taint) {
|
|
|
@@ -2719,7 +2718,7 @@ func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind s
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = testutils.WaitForPodsWithLabelRunning(c, ns, selector)
|
|
|
|
err = testutil.WaitForPodsWithLabelRunning(c, ns, selector)
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
|
|
|
|
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
@@ -2736,7 +2735,7 @@ func ScaleDeployment(clientset clientset.Interface, internalClientset internalcl
|
|
|
|
|
|
|
|
|
|
|
|
// Returns true if all the specified pods are scheduled, else returns false.
|
|
|
|
// Returns true if all the specified pods are scheduled, else returns false.
|
|
|
|
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
|
|
|
|
func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (bool, error) {
|
|
|
|
PodStore := testutils.NewPodStore(c, ns, label, fields.Everything())
|
|
|
|
PodStore := testutil.NewPodStore(c, ns, label, fields.Everything())
|
|
|
|
defer PodStore.Stop()
|
|
|
|
defer PodStore.Stop()
|
|
|
|
pods := PodStore.List()
|
|
|
|
pods := PodStore.List()
|
|
|
|
if len(pods) == 0 {
|
|
|
|
if len(pods) == 0 {
|
|
|
@@ -2798,7 +2797,7 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
|
|
|
|
}
|
|
|
|
}
|
|
|
|
current = 0
|
|
|
|
current = 0
|
|
|
|
for _, pod := range pods.Items {
|
|
|
|
for _, pod := range pods.Items {
|
|
|
|
if flag, err := testutils.PodRunningReady(&pod); err == nil && flag == true {
|
|
|
|
if flag, err := testutil.PodRunningReady(&pod); err == nil && flag == true {
|
|
|
|
current++
|
|
|
|
current++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
@@ -3032,8 +3031,8 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
|
|
|
|
|
|
|
|
|
|
|
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
|
|
|
|
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
|
|
|
|
// It waits until the reflector does a List() before returning.
|
|
|
|
// It waits until the reflector does a List() before returning.
|
|
|
|
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutils.PodStore, error) {
|
|
|
|
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutil.PodStore, error) {
|
|
|
|
ps := testutils.NewPodStore(c, ns, selector, fields.Everything())
|
|
|
|
ps := testutil.NewPodStore(c, ns, selector, fields.Everything())
|
|
|
|
err := wait.Poll(100*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
|
|
|
err := wait.Poll(100*time.Millisecond, 2*time.Minute, func() (bool, error) {
|
|
|
|
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
|
|
|
|
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
|
|
|
|
return true, nil
|
|
|
|
return true, nil
|
|
|
@@ -3047,7 +3046,7 @@ func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selec
|
|
|
|
// This is to make a fair comparison of deletion time between DeleteRCAndPods
|
|
|
|
// This is to make a fair comparison of deletion time between DeleteRCAndPods
|
|
|
|
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
|
|
|
// and DeleteRCAndWaitForGC, because the RC controller decreases status.replicas
|
|
|
|
// when the pod is inactvie.
|
|
|
|
// when the pod is inactvie.
|
|
|
|
func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
|
|
|
func waitForPodsInactive(ps *testutil.PodStore, interval, timeout time.Duration) error {
|
|
|
|
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
|
|
|
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
|
|
|
pods := ps.List()
|
|
|
|
pods := ps.List()
|
|
|
|
for _, pod := range pods {
|
|
|
|
for _, pod := range pods {
|
|
|
@@ -3060,7 +3059,7 @@ func waitForPodsInactive(ps *testutils.PodStore, interval, timeout time.Duration
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// waitForPodsGone waits until there are no pods left in the PodStore.
|
|
|
|
// waitForPodsGone waits until there are no pods left in the PodStore.
|
|
|
|
func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) error {
|
|
|
|
func waitForPodsGone(ps *testutil.PodStore, interval, timeout time.Duration) error {
|
|
|
|
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
|
|
|
return wait.PollImmediate(interval, timeout, func() (bool, error) {
|
|
|
|
if pods := ps.List(); len(pods) == 0 {
|
|
|
|
if pods := ps.List(); len(pods) == 0 {
|
|
|
|
return true, nil
|
|
|
|
return true, nil
|
|
|
@@ -3243,25 +3242,16 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) er
|
|
|
|
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
|
|
|
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
|
|
|
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
|
|
|
maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
|
|
|
if totalCreated > maxCreated {
|
|
|
|
if totalCreated > maxCreated {
|
|
|
|
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
|
|
|
|
|
|
|
logPodsOfDeployment(c, deployment, allRSs)
|
|
|
|
|
|
|
|
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
|
|
|
return false, fmt.Errorf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
minAvailable := deploymentutil.MinAvailable(deployment)
|
|
|
|
minAvailable := deploymentutil.MinAvailable(deployment)
|
|
|
|
if deployment.Status.AvailableReplicas < minAvailable {
|
|
|
|
if deployment.Status.AvailableReplicas < minAvailable {
|
|
|
|
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
|
|
|
|
|
|
|
logPodsOfDeployment(c, deployment, allRSs)
|
|
|
|
|
|
|
|
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
|
|
|
|
return false, fmt.Errorf("total pods available: %d, less than the min required: %d", deployment.Status.AvailableReplicas, minAvailable)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// When the deployment status and its underlying resources reach the desired state, we're done
|
|
|
|
// When the deployment status and its underlying resources reach the desired state, we're done
|
|
|
|
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
|
|
|
|
return deploymentutil.DeploymentComplete(deployment, &deployment.Status), nil
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
|
|
|
|
if err == wait.ErrWaitTimeout {
|
|
|
|
|
|
|
|
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
|
|
|
|
|
|
|
logPodsOfDeployment(c, deployment, allRSs)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
|
|
|
|
return fmt.Errorf("error waiting for deployment %q status to match expectation: %v", d.Name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
@@ -3325,13 +3315,6 @@ func WatchRecreateDeployment(c clientset.Interface, d *extensions.Deployment) er
|
|
|
|
status = d.Status
|
|
|
|
status = d.Status
|
|
|
|
|
|
|
|
|
|
|
|
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
|
|
|
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
|
|
|
|
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c)
|
|
|
|
|
|
|
|
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c)
|
|
|
|
|
|
|
|
if err == nil && nerr == nil {
|
|
|
|
|
|
|
|
Logf("%+v", d)
|
|
|
|
|
|
|
|
logReplicaSetsOfDeployment(d, allOldRSs, newRS)
|
|
|
|
|
|
|
|
logPodsOfDeployment(c, d, append(allOldRSs, newRS))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
|
|
|
return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@@ -3410,15 +3393,10 @@ func WaitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
|
|
|
|
})
|
|
|
|
})
|
|
|
|
if pollErr == wait.ErrWaitTimeout {
|
|
|
|
if pollErr == wait.ErrWaitTimeout {
|
|
|
|
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
|
|
|
pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName)
|
|
|
|
logReplicaSetsOfDeployment(d, oldRSs, nil)
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pollErr
|
|
|
|
return pollErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func logReplicaSetsOfDeployment(deployment *extensions.Deployment, allOldRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet) {
|
|
|
|
|
|
|
|
testutil.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, Logf)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
|
|
|
|
func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error {
|
|
|
|
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
|
|
|
|
return deploymentutil.WaitForObservedDeployment(func() (*extensions.Deployment, error) {
|
|
|
|
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
|
|
|
return c.Extensions().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
|
|
@@ -3438,19 +3416,10 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r
|
|
|
|
})
|
|
|
|
})
|
|
|
|
if pollErr == wait.ErrWaitTimeout {
|
|
|
|
if pollErr == wait.ErrWaitTimeout {
|
|
|
|
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions)
|
|
|
|
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason: %v", deployment.Name, deployment.Status.Conditions)
|
|
|
|
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c)
|
|
|
|
|
|
|
|
if err == nil {
|
|
|
|
|
|
|
|
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
|
|
|
|
|
|
|
|
logPodsOfDeployment(c, deployment, append(allOldRSs, newRS))
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return pollErr
|
|
|
|
return pollErr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) {
|
|
|
|
|
|
|
|
testutil.LogPodsOfDeployment(c, deployment, rsList, Logf)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Waits for the number of events on the given object to reach a desired count.
|
|
|
|
// Waits for the number of events on the given object to reach a desired count.
|
|
|
|
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
|
|
|
|
func WaitForEvents(c clientset.Interface, ns string, objOrRef runtime.Object, desiredEventsCount int) error {
|
|
|
|
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
|
|
|
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
|
|
|
@@ -3905,14 +3874,14 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
|
|
|
// podNames in namespace ns are running and ready, using c and waiting at most
|
|
|
|
// podNames in namespace ns are running and ready, using c and waiting at most
|
|
|
|
// timeout.
|
|
|
|
// timeout.
|
|
|
|
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
|
|
|
func CheckPodsRunningReady(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
|
|
|
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReady, "running and ready")
|
|
|
|
return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReady, "running and ready")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
|
|
|
|
// CheckPodsRunningReadyOrSucceeded returns whether all pods whose names are
|
|
|
|
// listed in podNames in namespace ns are running and ready, or succeeded; use
|
|
|
|
// listed in podNames in namespace ns are running and ready, or succeeded; use
|
|
|
|
// c and waiting at most timeout.
|
|
|
|
// c and waiting at most timeout.
|
|
|
|
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
|
|
|
func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames []string, timeout time.Duration) bool {
|
|
|
|
return CheckPodsCondition(c, ns, podNames, timeout, testutils.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
|
|
|
|
return CheckPodsCondition(c, ns, podNames, timeout, testutil.PodRunningReadyOrSucceeded, "running and ready, or succeeded")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// CheckPodsCondition returns whether all pods whose names are listed in podNames
|
|
|
|
// CheckPodsCondition returns whether all pods whose names are listed in podNames
|
|
|
@@ -4603,7 +4572,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
|
|
|
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
|
|
|
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
} else {
|
|
|
|
if err := testutils.WaitForPodsWithLabelRunning(
|
|
|
|
if err := testutil.WaitForPodsWithLabelRunning(
|
|
|
|
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
|
|
|
|
clientset, ns, labels.SelectorFromSet(labels.Set(rc.Spec.Selector))); err != nil {
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@@ -5097,22 +5066,22 @@ func ListNamespaceEvents(c clientset.Interface, ns string) error {
|
|
|
|
return nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used
|
|
|
|
// E2ETestNodePreparer implements testutil.TestNodePreparer interface, which is used
|
|
|
|
// to create/modify Nodes before running a test.
|
|
|
|
// to create/modify Nodes before running a test.
|
|
|
|
type E2ETestNodePreparer struct {
|
|
|
|
type E2ETestNodePreparer struct {
|
|
|
|
client clientset.Interface
|
|
|
|
client clientset.Interface
|
|
|
|
// Specifies how many nodes should be modified using the given strategy.
|
|
|
|
// Specifies how many nodes should be modified using the given strategy.
|
|
|
|
// Only one strategy can be applied to a single Node, so there needs to
|
|
|
|
// Only one strategy can be applied to a single Node, so there needs to
|
|
|
|
// be at least <sum_of_keys> Nodes in the cluster.
|
|
|
|
// be at least <sum_of_keys> Nodes in the cluster.
|
|
|
|
countToStrategy []testutils.CountToStrategy
|
|
|
|
countToStrategy []testutil.CountToStrategy
|
|
|
|
nodeToAppliedStrategy map[string]testutils.PrepareNodeStrategy
|
|
|
|
nodeToAppliedStrategy map[string]testutil.PrepareNodeStrategy
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutils.CountToStrategy) testutils.TestNodePreparer {
|
|
|
|
func NewE2ETestNodePreparer(client clientset.Interface, countToStrategy []testutil.CountToStrategy) testutil.TestNodePreparer {
|
|
|
|
return &E2ETestNodePreparer{
|
|
|
|
return &E2ETestNodePreparer{
|
|
|
|
client: client,
|
|
|
|
client: client,
|
|
|
|
countToStrategy: countToStrategy,
|
|
|
|
countToStrategy: countToStrategy,
|
|
|
|
nodeToAppliedStrategy: make(map[string]testutils.PrepareNodeStrategy),
|
|
|
|
nodeToAppliedStrategy: make(map[string]testutil.PrepareNodeStrategy),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@@ -5130,7 +5099,7 @@ func (p *E2ETestNodePreparer) PrepareNodes() error {
|
|
|
|
for _, v := range p.countToStrategy {
|
|
|
|
for _, v := range p.countToStrategy {
|
|
|
|
sum += v.Count
|
|
|
|
sum += v.Count
|
|
|
|
for ; index < sum; index++ {
|
|
|
|
for ; index < sum; index++ {
|
|
|
|
if err := testutils.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
|
|
|
|
if err := testutil.DoPrepareNode(p.client, &nodes.Items[index], v.Strategy); err != nil {
|
|
|
|
glog.Errorf("Aborting node preparation: %v", err)
|
|
|
|
glog.Errorf("Aborting node preparation: %v", err)
|
|
|
|
return err
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
@@ -5148,7 +5117,7 @@ func (p *E2ETestNodePreparer) CleanupNodes() error {
|
|
|
|
name := nodes.Items[i].Name
|
|
|
|
name := nodes.Items[i].Name
|
|
|
|
strategy, found := p.nodeToAppliedStrategy[name]
|
|
|
|
strategy, found := p.nodeToAppliedStrategy[name]
|
|
|
|
if found {
|
|
|
|
if found {
|
|
|
|
if err = testutils.DoCleanupNode(p.client, name, strategy); err != nil {
|
|
|
|
if err = testutil.DoCleanupNode(p.client, name, strategy); err != nil {
|
|
|
|
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
|
|
|
|
glog.Errorf("Skipping cleanup of Node: failed update of %v: %v", name, err)
|
|
|
|
encounteredError = err
|
|
|
|
encounteredError = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|