Merge pull request #126067 from tenzen-y/implement-job-success-policy-e2e
Graduate the JobSuccessPolicy to Beta
This commit is contained in:
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
@@ -81,7 +82,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods for job exist")
|
||||
@@ -173,7 +174,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -253,7 +254,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to await for the pod to be deleted: %s/%s", pod.Name, pod.Namespace)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -342,7 +343,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to await for the pod to be deleted: %s/%s", pod.Name, pod.Namespace)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -374,7 +375,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Waiting for job to complete")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -499,7 +500,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create indexed job in namespace %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, nil, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods with index for job exist")
|
||||
@@ -520,6 +521,123 @@ done`}
|
||||
gomega.Expect(gotIndexes).To(gomega.Equal(wantIndexes), "expected completed indexes %s, but got %s", wantIndexes, gotIndexes)
|
||||
})
|
||||
|
||||
/*
|
||||
Testcase: Ensure that job with successPolicy succeeded when all indexes succeeded
|
||||
Description: Create an indexed job with successPolicy.
|
||||
Verify that job got SuccessCriteriaMet with SuccessPolicy reason and Complete condition
|
||||
once all indexes succeeded.
|
||||
*/
|
||||
ginkgo.It("with successPolicy should succeeded when all indexes succeeded", func(ctx context.Context) {
|
||||
parallelism := int32(2)
|
||||
completions := int32(2)
|
||||
backoffLimit := int32(6) // default value
|
||||
|
||||
ginkgo.By("Creating an indexed job with successPolicy")
|
||||
job := e2ejob.NewTestJob("succeeded", "with-success-policy-all-index-succeeded", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job.Spec.CompletionMode = ptr.To(batchv1.IndexedCompletion)
|
||||
job.Spec.SuccessPolicy = &batchv1.SuccessPolicy{
|
||||
Rules: []batchv1.SuccessPolicyRule{{
|
||||
SucceededCount: ptr.To[int32](2),
|
||||
}},
|
||||
}
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for the job to have the interim SuccessCriteriaMet with SuccessPolicy reason condition")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobSuccessCriteriaMet, ptr.To(batchv1.JobReasonSuccessPolicy))
|
||||
framework.ExpectNoError(err, "failed to ensure that job has SuccessCriteriaMet with SuccessPolicy reason condition")
|
||||
|
||||
ginkgo.By("Ensure that the job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonSuccessPolicy), completions)
|
||||
framework.ExpectNoError(err, "failed to ensure that job completed")
|
||||
|
||||
ginkgo.By("Verifying that the job status to ensure correct final state")
|
||||
job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get latest job object")
|
||||
gomega.Expect(job.Status.Active).Should(gomega.Equal(int32(0)))
|
||||
gomega.Expect(job.Status.Ready).Should(gomega.Equal(ptr.To[int32](0)))
|
||||
gomega.Expect(job.Status.Terminating).Should(gomega.Equal(ptr.To[int32](0)))
|
||||
gomega.Expect(job.Status.Failed).Should(gomega.Equal(int32(0)))
|
||||
})
|
||||
|
||||
/*
|
||||
Testcase: Ensure that job with successPolicy succeededIndexes rule succeeded even when some indexes remain pending
|
||||
Description: Create an indexed job with successPolicy succeededIndexes rule.
|
||||
Verify that the job got SuccessCriteriaMet with SuccessPolicy reason condition and Complete condition
|
||||
when the job met successPolicy even if some indexed remain pending.
|
||||
*/
|
||||
ginkgo.It("with successPolicy succeededIndexes rule should succeeded even when some indexes remain pending", func(ctx context.Context) {
|
||||
parallelism := int32(2)
|
||||
completions := int32(5)
|
||||
backoffLimit := int32(6) // default value
|
||||
|
||||
ginkgo.By("Creating an indexed job with successPolicy succeededIndexes rule")
|
||||
job := e2ejob.NewTestJob("failOddSucceedEven", "with-success-policy-succeeded-indexed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job.Spec.CompletionMode = ptr.To(batchv1.IndexedCompletion)
|
||||
job.Spec.SuccessPolicy = &batchv1.SuccessPolicy{
|
||||
Rules: []batchv1.SuccessPolicyRule{{
|
||||
SucceededIndexes: ptr.To("0"),
|
||||
}},
|
||||
}
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for the job to have the interim SuccessCriteriaMet with SuccessPolicy reason condition")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobSuccessCriteriaMet, ptr.To(batchv1.JobReasonSuccessPolicy))
|
||||
framework.ExpectNoError(err, "failed to ensure that job has SuccessCriteriaMet with SuccessPolicy reason condition")
|
||||
|
||||
ginkgo.By("Ensure that the job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonSuccessPolicy), 1)
|
||||
framework.ExpectNoError(err, "failed to ensure that job completed")
|
||||
|
||||
ginkgo.By("Verifying that the only appropriately index succeeded")
|
||||
job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get the latest job object")
|
||||
gomega.Expect(job.Status.CompletedIndexes).Should(gomega.Equal("0"))
|
||||
gomega.Expect(job.Status.Active).Should(gomega.Equal(int32(0)))
|
||||
gomega.Expect(job.Status.Ready).Should(gomega.Equal(ptr.To[int32](0)))
|
||||
gomega.Expect(job.Status.Terminating).Should(gomega.Equal(ptr.To[int32](0)))
|
||||
})
|
||||
|
||||
/*
|
||||
Testcase: Ensure that job with successPolicy succeededCount rule succeeded even when some indexes remain pending
|
||||
Description: Create an indexed job with successPolicy succeededCount rule.
|
||||
Verify that the job got the SuccessCriteriaMet with SuccessPolicy reason condition and Complete condition
|
||||
when the job met successPolicy even if some indexed remain pending.
|
||||
*/
|
||||
ginkgo.It("with successPolicy succeededCount rule should succeeded even when some indexes remain pending", func(ctx context.Context) {
|
||||
parallelism := int32(2)
|
||||
completions := int32(5)
|
||||
backoffLimit := int32(math.MaxInt32)
|
||||
|
||||
ginkgo.By("Creating an indexed job with successPolicy succeededCount rule")
|
||||
job := e2ejob.NewTestJob("failOddSucceedEven", "with-success-policy-succeeded-count", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||
job.Spec.CompletionMode = ptr.To(batchv1.IndexedCompletion)
|
||||
job.Spec.SuccessPolicy = &batchv1.SuccessPolicy{
|
||||
Rules: []batchv1.SuccessPolicyRule{{
|
||||
SucceededCount: ptr.To[int32](1),
|
||||
}},
|
||||
}
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for the job to have the interim SuccessCriteriaMet condition with SuccessPolicy reason")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobSuccessCriteriaMet, ptr.To(batchv1.JobReasonSuccessPolicy))
|
||||
framework.ExpectNoError(err, "failed to ensure that the job has SuccessCriteriaMet condition with SuccessPolicy rule")
|
||||
|
||||
ginkgo.By("Ensure that the job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonSuccessPolicy), 1)
|
||||
framework.ExpectNoError(err, "failed to ensure that job completed")
|
||||
|
||||
ginkgo.By("Verifying that the job status to ensure correct final state")
|
||||
job, err = e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||
framework.ExpectNoError(err, "failed to get the latest job object")
|
||||
gomega.Expect(job.Status.CompletedIndexes).Should(gomega.Equal("0"))
|
||||
gomega.Expect(job.Status.Active).Should(gomega.Equal(int32(0)))
|
||||
gomega.Expect(job.Status.Ready).Should(gomega.Equal(ptr.To[int32](0)))
|
||||
gomega.Expect(job.Status.Terminating).Should(gomega.Equal(ptr.To[int32](0)))
|
||||
})
|
||||
|
||||
/*
|
||||
Testcase: Ensure that all indexes are executed for an indexed job with backoffLimitPerIndex despite some failing
|
||||
Description: Create an indexed job and ensure that all indexes are either failed or succeeded, depending
|
||||
@@ -683,7 +801,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, nil, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@@ -710,11 +828,11 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for the job to have the interim success condition")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobSuccessCriteriaMet, "")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobSuccessCriteriaMet, ptr.To(batchv1.JobReasonCompletionsReached))
|
||||
framework.ExpectNoError(err, "failed to ensure job has the interim success condition: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), *job.Spec.Completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Verifying the Job status fields to ensure correct final state")
|
||||
@@ -737,11 +855,11 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for the job to have the interim failure condition")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailureTarget, batchv1.JobReasonDeadlineExceeded)
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailureTarget, ptr.To(batchv1.JobReasonDeadlineExceeded))
|
||||
framework.ExpectNoError(err, "failed to ensure job has the interim failure condition: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job past active deadline")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailed, batchv1.JobReasonDeadlineExceeded)
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailed, ptr.To(batchv1.JobReasonDeadlineExceeded))
|
||||
framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Verifying the Job status fields to ensure correct final state")
|
||||
@@ -852,11 +970,11 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for the job to have the interim failure condition")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailureTarget, batchv1.JobReasonBackoffLimitExceeded)
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailureTarget, ptr.To(batchv1.JobReasonBackoffLimitExceeded))
|
||||
framework.ExpectNoError(err, "failed to ensure job has the interim failure condition: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job exceed backofflimit")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailed, batchv1.JobReasonBackoffLimitExceeded)
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobFailed, ptr.To(batchv1.JobReasonBackoffLimitExceeded))
|
||||
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||
@@ -909,7 +1027,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To(batchv1.JobReasonCompletionsReached), completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring pods for job exist")
|
||||
@@ -1115,7 +1233,7 @@ done`}
|
||||
framework.Logf("Job: %v as labels: %v", testJob.Name, testJob.Labels)
|
||||
|
||||
ginkgo.By("Waiting for job to complete")
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, ns, jobName, completions)
|
||||
err = e2ejob.WaitForJobComplete(ctx, f.ClientSet, ns, jobName, nil, completions)
|
||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", ns)
|
||||
|
||||
ginkgo.By("Delete a job collection with a labelselector")
|
||||
|
||||
@@ -68,7 +68,14 @@ func waitForJobPodsInPhase(ctx context.Context, c clientset.Interface, ns, jobNa
|
||||
}
|
||||
|
||||
// WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns.
|
||||
func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName string, completions int32) error {
|
||||
// This function checks if the number of succeeded Job Pods reached expected completions and
|
||||
// the Job has a "Complete" condition with the expected reason.
|
||||
// The pointer "reason" argument allows us to skip "Complete" condition reason verifications.
|
||||
// The conformance test cases have the different expected "Complete" condition reason ("CompletionsReached" vs "")
|
||||
// between conformance CI jobs and e2e CI jobs since the e2e conformance test cases are performed in
|
||||
// both conformance CI jobs with GA-only features and e2e CI jobs with all default-enabled features.
|
||||
// So, we need to skip "Complete" condition reason verifications in the e2e conformance test cases.
|
||||
func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName string, reason *string, completions int32) error {
|
||||
if err := wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -78,7 +85,7 @@ func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName
|
||||
}); err != nil {
|
||||
return nil
|
||||
}
|
||||
return WaitForJobCondition(ctx, c, ns, jobName, batchv1.JobComplete, "")
|
||||
return WaitForJobCondition(ctx, c, ns, jobName, batchv1.JobComplete, reason)
|
||||
}
|
||||
|
||||
// WaitForJobReady waits for particular value of the Job .status.ready field
|
||||
@@ -115,8 +122,10 @@ func WaitForJobFailed(c clientset.Interface, ns, jobName string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// waitForJobCondition waits for the specified Job to have the expected condition with the specific reason.
|
||||
func WaitForJobCondition(ctx context.Context, c clientset.Interface, ns, jobName string, cType batchv1.JobConditionType, reason string) error {
|
||||
// WaitForJobCondition waits for the specified Job to have the expected condition with the specific reason.
|
||||
// When the nil reason is passed, the "reason" string in the condition is
|
||||
// not checked.
|
||||
func WaitForJobCondition(ctx context.Context, c clientset.Interface, ns, jobName string, cType batchv1.JobConditionType, reason *string) error {
|
||||
err := wait.PollUntilContextTimeout(ctx, framework.Poll, JobTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(ctx, jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -124,7 +133,7 @@ func WaitForJobCondition(ctx context.Context, c clientset.Interface, ns, jobName
|
||||
}
|
||||
for _, c := range curr.Status.Conditions {
|
||||
if c.Type == cType && c.Status == v1.ConditionTrue {
|
||||
if reason == c.Reason {
|
||||
if reason == nil || *reason == c.Reason {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@@ -132,7 +141,7 @@ func WaitForJobCondition(ctx context.Context, c clientset.Interface, ns, jobName
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("waiting for Job %q to have the condition %q with reason: %q: %w", jobName, cType, reason, err)
|
||||
return fmt.Errorf("waiting for Job %q to have the condition %q with reason: %v: %w", jobName, cType, reason, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -389,7 +389,7 @@ func TestJobPodFailurePolicy(t *testing.T) {
|
||||
Value: 1,
|
||||
},
|
||||
wantJobFinishedMetric: metricLabelsWithValue{
|
||||
Labels: []string{"NonIndexed", "succeeded", ""},
|
||||
Labels: []string{"NonIndexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
@@ -400,7 +400,7 @@ func TestJobPodFailurePolicy(t *testing.T) {
|
||||
wantFailed: 1,
|
||||
wantJobConditionType: batchv1.JobComplete,
|
||||
wantJobFinishedMetric: metricLabelsWithValue{
|
||||
Labels: []string{"NonIndexed", "succeeded", ""},
|
||||
Labels: []string{"NonIndexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
},
|
||||
wantPodFailuresHandledByPolicyRuleMetric: &metricLabelsWithValue{
|
||||
@@ -415,7 +415,7 @@ func TestJobPodFailurePolicy(t *testing.T) {
|
||||
wantFailed: 1,
|
||||
wantJobConditionType: batchv1.JobComplete,
|
||||
wantJobFinishedMetric: metricLabelsWithValue{
|
||||
Labels: []string{"NonIndexed", "succeeded", ""},
|
||||
Labels: []string{"NonIndexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
},
|
||||
wantPodFailuresHandledByPolicyRuleMetric: &metricLabelsWithValue{
|
||||
@@ -547,7 +547,7 @@ func TestSuccessPolicy(t *testing.T) {
|
||||
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobSuccessCriteriaMet, batchv1.JobComplete},
|
||||
wantJobFinishedNumMetric: []metricLabelsWithValue{
|
||||
{
|
||||
Labels: []string{"Indexed", "succeeded", ""},
|
||||
Labels: []string{"Indexed", "succeeded", "SuccessPolicy"},
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
@@ -587,6 +587,37 @@ func TestSuccessPolicy(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"job without successPolicy; incremented the jobs_finished_total metric with CompletionsReached reason": {
|
||||
enableJobSuccessPolicy: true,
|
||||
job: batchv1.Job{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ptr.To[int32](1),
|
||||
Completions: ptr.To[int32](1),
|
||||
CompletionMode: completionModePtr(batchv1.IndexedCompletion),
|
||||
Template: podTemplateSpec,
|
||||
},
|
||||
},
|
||||
podTerminations: []podTerminationWithExpectations{
|
||||
{
|
||||
index: 0,
|
||||
status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
},
|
||||
wantActive: 0,
|
||||
wantFailed: 0,
|
||||
wantSucceeded: 1,
|
||||
wantCompletedIndexes: "0",
|
||||
wantTerminating: ptr.To[int32](0),
|
||||
},
|
||||
},
|
||||
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobSuccessCriteriaMet, batchv1.JobComplete},
|
||||
wantJobFinishedNumMetric: []metricLabelsWithValue{
|
||||
{
|
||||
Labels: []string{"Indexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
"job with successPolicy with succeededIndexes; job has SuccessCriteriaMet and Complete conditions even if some indexes remain pending": {
|
||||
enableJobSuccessPolicy: true,
|
||||
job: batchv1.Job{
|
||||
@@ -629,7 +660,7 @@ func TestSuccessPolicy(t *testing.T) {
|
||||
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobSuccessCriteriaMet, batchv1.JobComplete},
|
||||
wantJobFinishedNumMetric: []metricLabelsWithValue{
|
||||
{
|
||||
Labels: []string{"Indexed", "succeeded", ""},
|
||||
Labels: []string{"Indexed", "succeeded", "SuccessPolicy"},
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
@@ -676,7 +707,7 @@ func TestSuccessPolicy(t *testing.T) {
|
||||
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobSuccessCriteriaMet, batchv1.JobComplete},
|
||||
wantJobFinishedNumMetric: []metricLabelsWithValue{
|
||||
{
|
||||
Labels: []string{"Indexed", "succeeded", ""},
|
||||
Labels: []string{"Indexed", "succeeded", "SuccessPolicy"},
|
||||
Value: 1,
|
||||
},
|
||||
},
|
||||
@@ -1185,6 +1216,7 @@ func TestDelayTerminalPhaseCondition(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
enableJobManagedBy bool
|
||||
enableJobPodReplacementPolicy bool
|
||||
enableJobSuccessPolicy bool
|
||||
|
||||
job batchv1.Job
|
||||
action func(context.Context, clientset.Interface, *batchv1.Job)
|
||||
@@ -1393,6 +1425,92 @@ func TestDelayTerminalPhaseCondition(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"job scale down to meet completions; JobManagedBy and JobSuccessPolicy are enabled": {
|
||||
enableJobManagedBy: true,
|
||||
enableJobSuccessPolicy: true,
|
||||
job: batchv1.Job{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ptr.To[int32](2),
|
||||
Completions: ptr.To[int32](2),
|
||||
CompletionMode: ptr.To(batchv1.IndexedCompletion),
|
||||
Template: podTemplateSpec,
|
||||
},
|
||||
},
|
||||
action: succeedOnePodAndScaleDown,
|
||||
wantInterimStatus: &batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantTerminalStatus: batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
{
|
||||
Type: batchv1.JobComplete,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"job scale down to meet completions; JobPodReplacementPolicy and JobSuccessPolicy are enabled": {
|
||||
enableJobPodReplacementPolicy: true,
|
||||
enableJobSuccessPolicy: true,
|
||||
job: batchv1.Job{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ptr.To[int32](2),
|
||||
Completions: ptr.To[int32](2),
|
||||
CompletionMode: ptr.To(batchv1.IndexedCompletion),
|
||||
Template: podTemplateSpec,
|
||||
},
|
||||
},
|
||||
action: succeedOnePodAndScaleDown,
|
||||
wantInterimStatus: &batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](1),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantTerminalStatus: batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
{
|
||||
Type: batchv1.JobComplete,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, test := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
@@ -1400,6 +1518,7 @@ func TestDelayTerminalPhaseCondition(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, test.enableJobManagedBy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.ElasticIndexedJob, true)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, test.enableJobSuccessPolicy)
|
||||
|
||||
closeFn, restConfig, clientSet, ns := setup(t, "delay-terminal-condition")
|
||||
t.Cleanup(closeFn)
|
||||
@@ -2355,7 +2474,7 @@ func TestNonParallelJob(t *testing.T) {
|
||||
})
|
||||
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedNum, metricLabelsWithValue{
|
||||
Labels: []string{"NonIndexed", "succeeded", ""},
|
||||
Labels: []string{"NonIndexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
})
|
||||
validateCounterMetric(ctx, t, metrics.JobPodsFinished, metricLabelsWithValue{
|
||||
@@ -2443,7 +2562,7 @@ func TestParallelJob(t *testing.T) {
|
||||
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
|
||||
validateTerminatedPodsTrackingFinalizerMetric(ctx, t, 7)
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedNum, metricLabelsWithValue{
|
||||
Labels: []string{"NonIndexed", "succeeded", ""},
|
||||
Labels: []string{"NonIndexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
})
|
||||
validateCounterMetric(ctx, t, metrics.JobPodsFinished, metricLabelsWithValue{
|
||||
@@ -2585,7 +2704,7 @@ func TestParallelJobWithCompletions(t *testing.T) {
|
||||
validateJobPodsStatus(ctx, t, clientSet, jobObj, want)
|
||||
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedNum, metricLabelsWithValue{
|
||||
Labels: []string{"NonIndexed", "succeeded", ""},
|
||||
Labels: []string{"NonIndexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
})
|
||||
validateCounterMetric(ctx, t, metrics.JobPodsFinished, metricLabelsWithValue{
|
||||
@@ -2681,7 +2800,7 @@ func TestIndexedJob(t *testing.T) {
|
||||
Value: 4,
|
||||
})
|
||||
validateCounterMetric(ctx, t, metrics.JobFinishedNum, metricLabelsWithValue{
|
||||
Labels: []string{"Indexed", "succeeded", ""},
|
||||
Labels: []string{"Indexed", "succeeded", "CompletionsReached"},
|
||||
Value: 1,
|
||||
})
|
||||
validateCounterMetric(ctx, t, metrics.JobPodsFinished, metricLabelsWithValue{
|
||||
|
||||
Reference in New Issue
Block a user