Job: Add the CompletionsReached reason to the SuccessCriteriaMet condition
Signed-off-by: Yuki Iwai <yuki.iwai.tz@gmail.com>
This commit is contained in:
@@ -988,7 +988,12 @@ func (jm *Controller) newSuccessCondition() *batch.JobCondition {
|
||||
if delayTerminalCondition() {
|
||||
cType = batch.JobSuccessCriteriaMet
|
||||
}
|
||||
return newCondition(cType, v1.ConditionTrue, "", "", jm.clock.Now())
|
||||
var reason, message string
|
||||
if feature.DefaultFeatureGate.Enabled(features.JobSuccessPolicy) {
|
||||
reason = batch.JobReasonCompletionsReached
|
||||
message = "Reached expected number of succeeded pods"
|
||||
}
|
||||
return newCondition(cType, v1.ConditionTrue, reason, message, jm.clock.Now())
|
||||
}
|
||||
|
||||
func delayTerminalCondition() bool {
|
||||
|
||||
@@ -4991,6 +4991,45 @@ func TestSyncJobWithJobSuccessPolicy(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"job without successPolicy; jobSuccessPolicy is enabled; job got SuccessCriteriaMet and Completion with CompletionsReached reason conditions": {
|
||||
enableJobSuccessPolicy: true,
|
||||
enableJobManagedBy: true,
|
||||
job: batch.Job{
|
||||
TypeMeta: validTypeMeta,
|
||||
ObjectMeta: validObjectMeta,
|
||||
Spec: batch.JobSpec{
|
||||
Selector: validSelector,
|
||||
Template: validTemplate,
|
||||
CompletionMode: ptr.To(batch.IndexedCompletion),
|
||||
Completions: ptr.To[int32](1),
|
||||
Parallelism: ptr.To[int32](1),
|
||||
BackoffLimit: ptr.To[int32](math.MaxInt32),
|
||||
},
|
||||
},
|
||||
pods: []v1.Pod{
|
||||
*buildPod().uid("a1").index("0").phase(v1.PodSucceeded).trackingFinalizer().Pod,
|
||||
},
|
||||
wantStatus: batch.JobStatus{
|
||||
Failed: 0,
|
||||
Succeeded: 1,
|
||||
CompletedIndexes: "0",
|
||||
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
|
||||
Conditions: []batch.JobCondition{
|
||||
{
|
||||
Type: batch.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batch.JobReasonCompletionsReached,
|
||||
Message: "Reached expected number of succeeded pods",
|
||||
},
|
||||
{
|
||||
Type: batch.JobComplete,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batch.JobReasonCompletionsReached,
|
||||
Message: "Reached expected number of succeeded pods",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"when the JobSuccessPolicy is disabled, the Job never got SuccessCriteriaMet condition even if the Job has the successPolicy field": {
|
||||
job: batch.Job{
|
||||
TypeMeta: validTypeMeta,
|
||||
|
||||
@@ -651,6 +651,11 @@ const (
|
||||
// https://kep.k8s.io/3998
|
||||
// This is currently an alpha field.
|
||||
JobReasonSuccessPolicy string = "SuccessPolicy"
|
||||
// JobReasonCompletionsReached reason indicates a SuccessCriteriaMet condition is added due to
|
||||
// a number of succeeded Job pods met completions.
|
||||
// - https://kep.k8s.io/3998
|
||||
// This is currently a beta field.
|
||||
JobReasonCompletionsReached string = "CompletionsReached"
|
||||
)
|
||||
|
||||
// JobCondition describes current state of a job.
|
||||
|
||||
@@ -710,7 +710,7 @@ done`}
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Awaiting for the job to have the interim success condition")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobSuccessCriteriaMet, "")
|
||||
err = e2ejob.WaitForJobCondition(ctx, f.ClientSet, f.Namespace.Name, job.Name, batchv1.JobSuccessCriteriaMet, batchv1.JobReasonCompletionsReached)
|
||||
framework.ExpectNoError(err, "failed to ensure job has the interim success condition: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Ensuring job reaches completions")
|
||||
|
||||
@@ -511,10 +511,10 @@ func TestSuccessPolicy(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
enableJobSuccessPolicy bool
|
||||
enableBackoffLimitPerIndex bool
|
||||
job batchv1.Job
|
||||
podTerminations []podTerminationWithExpectations
|
||||
wantConditionTypes []batchv1.JobConditionType
|
||||
wantJobFinishedNumMetric []metricLabelsWithValue
|
||||
job batchv1.Job
|
||||
podTerminations []podTerminationWithExpectations
|
||||
wantConditionTypes []batchv1.JobConditionType
|
||||
wantJobFinishedNumMetric []metricLabelsWithValue
|
||||
}{
|
||||
"all indexes succeeded; JobSuccessPolicy is enabled": {
|
||||
enableJobSuccessPolicy: true,
|
||||
@@ -1185,6 +1185,7 @@ func TestDelayTerminalPhaseCondition(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
enableJobManagedBy bool
|
||||
enableJobPodReplacementPolicy bool
|
||||
enableJobSuccessPolicy bool
|
||||
|
||||
job batchv1.Job
|
||||
action func(context.Context, clientset.Interface, *batchv1.Job)
|
||||
@@ -1393,6 +1394,92 @@ func TestDelayTerminalPhaseCondition(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"job scale down to meet completions; JobManagedBy and JobSuccessPolicy are enabled": {
|
||||
enableJobManagedBy: true,
|
||||
enableJobSuccessPolicy: true,
|
||||
job: batchv1.Job{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ptr.To[int32](2),
|
||||
Completions: ptr.To[int32](2),
|
||||
CompletionMode: ptr.To(batchv1.IndexedCompletion),
|
||||
Template: podTemplateSpec,
|
||||
},
|
||||
},
|
||||
action: succeedOnePodAndScaleDown,
|
||||
wantInterimStatus: &batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantTerminalStatus: batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
{
|
||||
Type: batchv1.JobComplete,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"job scale down to meet completions; JobPodReplacementPolicy and JobSuccessPolicy are enabled": {
|
||||
enableJobPodReplacementPolicy: true,
|
||||
enableJobSuccessPolicy: true,
|
||||
job: batchv1.Job{
|
||||
Spec: batchv1.JobSpec{
|
||||
Parallelism: ptr.To[int32](2),
|
||||
Completions: ptr.To[int32](2),
|
||||
CompletionMode: ptr.To(batchv1.IndexedCompletion),
|
||||
Template: podTemplateSpec,
|
||||
},
|
||||
},
|
||||
action: succeedOnePodAndScaleDown,
|
||||
wantInterimStatus: &batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](1),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantTerminalStatus: batchv1.JobStatus{
|
||||
Succeeded: 1,
|
||||
Ready: ptr.To[int32](0),
|
||||
Terminating: ptr.To[int32](0),
|
||||
CompletedIndexes: "0",
|
||||
Conditions: []batchv1.JobCondition{
|
||||
{
|
||||
Type: batchv1.JobSuccessCriteriaMet,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
{
|
||||
Type: batchv1.JobComplete,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: batchv1.JobReasonCompletionsReached,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, test := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
@@ -1400,6 +1487,7 @@ func TestDelayTerminalPhaseCondition(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodReplacementPolicy, test.enableJobPodReplacementPolicy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobManagedBy, test.enableJobManagedBy)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.ElasticIndexedJob, true)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, test.enableJobSuccessPolicy)
|
||||
|
||||
closeFn, restConfig, clientSet, ns := setup(t, "delay-terminal-condition")
|
||||
t.Cleanup(closeFn)
|
||||
|
||||
Reference in New Issue
Block a user