Merge pull request #123412 from tenzen-y/add-new-jobsuccesspolicy-api

Job: Support for the SuccessPolicy
This commit is contained in:
Kubernetes Prow Robot
2024-03-07 14:49:20 -08:00
committed by GitHub
35 changed files with 3874 additions and 149 deletions

View File

@@ -4781,6 +4781,10 @@
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector",
"description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" "description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors"
}, },
"successPolicy": {
"$ref": "#/definitions/io.k8s.api.batch.v1.SuccessPolicy",
"description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is alpha-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (disabled by default)."
},
"suspend": { "suspend": {
"description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
"type": "boolean" "type": "boolean"
@@ -4962,6 +4966,38 @@
], ],
"type": "object" "type": "object"
}, },
"io.k8s.api.batch.v1.SuccessPolicy": {
"description": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
"properties": {
"rules": {
"description": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
"items": {
"$ref": "#/definitions/io.k8s.api.batch.v1.SuccessPolicyRule"
},
"type": "array",
"x-kubernetes-list-type": "atomic"
}
},
"required": [
"rules"
],
"type": "object"
},
"io.k8s.api.batch.v1.SuccessPolicyRule": {
"description": "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.",
"properties": {
"succeededCount": {
"description": "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.",
"format": "int32",
"type": "integer"
},
"succeededIndexes": {
"description": "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.",
"type": "string"
}
},
"type": "object"
},
"io.k8s.api.batch.v1.UncountedTerminatedPods": { "io.k8s.api.batch.v1.UncountedTerminatedPods": {
"description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", "description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.",
"properties": { "properties": {

View File

@@ -382,6 +382,14 @@
], ],
"description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" "description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors"
}, },
"successPolicy": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.batch.v1.SuccessPolicy"
}
],
"description": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is alpha-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (disabled by default)."
},
"suspend": { "suspend": {
"description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", "description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.",
"type": "boolean" "type": "boolean"
@@ -614,6 +622,43 @@
], ],
"type": "object" "type": "object"
}, },
"io.k8s.api.batch.v1.SuccessPolicy": {
"description": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
"properties": {
"rules": {
"description": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
"items": {
"allOf": [
{
"$ref": "#/components/schemas/io.k8s.api.batch.v1.SuccessPolicyRule"
}
],
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "atomic"
}
},
"required": [
"rules"
],
"type": "object"
},
"io.k8s.api.batch.v1.SuccessPolicyRule": {
"description": "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.",
"properties": {
"succeededCount": {
"description": "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.",
"format": "int32",
"type": "integer"
},
"succeededIndexes": {
"description": "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.",
"type": "string"
}
},
"type": "object"
},
"io.k8s.api.batch.v1.UncountedTerminatedPods": { "io.k8s.api.batch.v1.UncountedTerminatedPods": {
"description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", "description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.",
"properties": { "properties": {

View File

@@ -259,6 +259,51 @@ type PodFailurePolicy struct {
Rules []PodFailurePolicyRule Rules []PodFailurePolicyRule
} }
// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.
type SuccessPolicy struct {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
// +listType=atomic
Rules []SuccessPolicyRule
}
// SuccessPolicyRule describes rule for declaring a Job as succeeded.
// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified.
type SuccessPolicyRule struct {
// succeededIndexes specifies the set of indexes
// which need to be contained in the actual set of the succeeded indexes for the Job.
// The list of indexes must be within 0 to ".spec.completions-1" and
// must not contain duplicates. At least one element is required.
// The indexes are represented as intervals separated by commas.
// The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen.
// The number are listed in represented by the first and last element of the series,
// separated by a hyphen.
// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// When this field is null, this field doesn't default to any value
// and is never evaluated at any time.
//
// +optional
SucceededIndexes *string
// succeededCount specifies the minimal required size of the actual set of the succeeded indexes
// for the Job. When succeededCount is used along with succeededIndexes, the check is
// constrained only to the set of indexes specified by succeededIndexes.
// For example, given that succeededIndexes is "1-4", succeededCount is "3",
// and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded
// because only "1" and "3" indexes are considered in that rules.
// When this field is null, this doesn't default to any value and
// is never evaluated at any time.
// When specified it needs to be a positive integer.
//
// +optional
SucceededCount *int32
}
// JobSpec describes how the job execution will look like. // JobSpec describes how the job execution will look like.
type JobSpec struct { type JobSpec struct {
@@ -290,6 +335,17 @@ type JobSpec struct {
// +optional // +optional
PodFailurePolicy *PodFailurePolicy PodFailurePolicy *PodFailurePolicy
// successPolicy specifies the policy when the Job can be declared as succeeded.
// If empty, the default behavior applies - the Job is declared as succeeded
// only when the number of succeeded pods equals to the completions.
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
// This field is alpha-level. To use this field, you must enable the
// `JobSuccessPolicy` feature gate (disabled by default).
// +optional
SuccessPolicy *SuccessPolicy
// Specifies the duration in seconds relative to the startTime that the job // Specifies the duration in seconds relative to the startTime that the job
// may be continuously active before the system tries to terminate it; value // may be continuously active before the system tries to terminate it; value
// must be positive integer. If a Job is suspended (at creation or through an // must be positive integer. If a Job is suspended (at creation or through an
@@ -569,6 +625,8 @@ const (
JobFailed JobConditionType = "Failed" JobFailed JobConditionType = "Failed"
// FailureTarget means the job is about to fail its execution. // FailureTarget means the job is about to fail its execution.
JobFailureTarget JobConditionType = "FailureTarget" JobFailureTarget JobConditionType = "FailureTarget"
// JobSuccessCriteriaMet means the Job has reached a success state and will be marked as Completed
JobSuccessCriteriaMet JobConditionType = "SuccessCriteriaMet"
) )
// JobCondition describes current state of a job. // JobCondition describes current state of a job.

View File

@@ -172,6 +172,26 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil { }); err != nil {
return err return err
} }
if err := s.AddGeneratedConversionFunc((*v1.SuccessPolicy)(nil), (*batch.SuccessPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SuccessPolicy_To_batch_SuccessPolicy(a.(*v1.SuccessPolicy), b.(*batch.SuccessPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.SuccessPolicy)(nil), (*v1.SuccessPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_SuccessPolicy_To_v1_SuccessPolicy(a.(*batch.SuccessPolicy), b.(*v1.SuccessPolicy), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.SuccessPolicyRule)(nil), (*batch.SuccessPolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(a.(*v1.SuccessPolicyRule), b.(*batch.SuccessPolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*batch.SuccessPolicyRule)(nil), (*v1.SuccessPolicyRule)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(a.(*batch.SuccessPolicyRule), b.(*v1.SuccessPolicyRule), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1.UncountedTerminatedPods)(nil), (*batch.UncountedTerminatedPods)(nil), func(a, b interface{}, scope conversion.Scope) error { if err := s.AddGeneratedConversionFunc((*v1.UncountedTerminatedPods)(nil), (*batch.UncountedTerminatedPods)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(a.(*v1.UncountedTerminatedPods), b.(*batch.UncountedTerminatedPods), scope) return Convert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(a.(*v1.UncountedTerminatedPods), b.(*batch.UncountedTerminatedPods), scope)
}); err != nil { }); err != nil {
@@ -440,6 +460,7 @@ func autoConvert_v1_JobSpec_To_batch_JobSpec(in *v1.JobSpec, out *batch.JobSpec,
out.Completions = (*int32)(unsafe.Pointer(in.Completions)) out.Completions = (*int32)(unsafe.Pointer(in.Completions))
out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds))
out.PodFailurePolicy = (*batch.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy)) out.PodFailurePolicy = (*batch.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy))
out.SuccessPolicy = (*batch.SuccessPolicy)(unsafe.Pointer(in.SuccessPolicy))
out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit)) out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit))
out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex)) out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex))
out.MaxFailedIndexes = (*int32)(unsafe.Pointer(in.MaxFailedIndexes)) out.MaxFailedIndexes = (*int32)(unsafe.Pointer(in.MaxFailedIndexes))
@@ -460,6 +481,7 @@ func autoConvert_batch_JobSpec_To_v1_JobSpec(in *batch.JobSpec, out *v1.JobSpec,
out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism)) out.Parallelism = (*int32)(unsafe.Pointer(in.Parallelism))
out.Completions = (*int32)(unsafe.Pointer(in.Completions)) out.Completions = (*int32)(unsafe.Pointer(in.Completions))
out.PodFailurePolicy = (*v1.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy)) out.PodFailurePolicy = (*v1.PodFailurePolicy)(unsafe.Pointer(in.PodFailurePolicy))
out.SuccessPolicy = (*v1.SuccessPolicy)(unsafe.Pointer(in.SuccessPolicy))
out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds))
out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit)) out.BackoffLimit = (*int32)(unsafe.Pointer(in.BackoffLimit))
out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex)) out.BackoffLimitPerIndex = (*int32)(unsafe.Pointer(in.BackoffLimitPerIndex))
@@ -633,6 +655,48 @@ func Convert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule(in *batch.Pod
return autoConvert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule(in, out, s) return autoConvert_batch_PodFailurePolicyRule_To_v1_PodFailurePolicyRule(in, out, s)
} }
func autoConvert_v1_SuccessPolicy_To_batch_SuccessPolicy(in *v1.SuccessPolicy, out *batch.SuccessPolicy, s conversion.Scope) error {
out.Rules = *(*[]batch.SuccessPolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_v1_SuccessPolicy_To_batch_SuccessPolicy is an autogenerated conversion function.
func Convert_v1_SuccessPolicy_To_batch_SuccessPolicy(in *v1.SuccessPolicy, out *batch.SuccessPolicy, s conversion.Scope) error {
return autoConvert_v1_SuccessPolicy_To_batch_SuccessPolicy(in, out, s)
}
func autoConvert_batch_SuccessPolicy_To_v1_SuccessPolicy(in *batch.SuccessPolicy, out *v1.SuccessPolicy, s conversion.Scope) error {
out.Rules = *(*[]v1.SuccessPolicyRule)(unsafe.Pointer(&in.Rules))
return nil
}
// Convert_batch_SuccessPolicy_To_v1_SuccessPolicy is an autogenerated conversion function.
func Convert_batch_SuccessPolicy_To_v1_SuccessPolicy(in *batch.SuccessPolicy, out *v1.SuccessPolicy, s conversion.Scope) error {
return autoConvert_batch_SuccessPolicy_To_v1_SuccessPolicy(in, out, s)
}
func autoConvert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(in *v1.SuccessPolicyRule, out *batch.SuccessPolicyRule, s conversion.Scope) error {
out.SucceededIndexes = (*string)(unsafe.Pointer(in.SucceededIndexes))
out.SucceededCount = (*int32)(unsafe.Pointer(in.SucceededCount))
return nil
}
// Convert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule is an autogenerated conversion function.
func Convert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(in *v1.SuccessPolicyRule, out *batch.SuccessPolicyRule, s conversion.Scope) error {
return autoConvert_v1_SuccessPolicyRule_To_batch_SuccessPolicyRule(in, out, s)
}
func autoConvert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(in *batch.SuccessPolicyRule, out *v1.SuccessPolicyRule, s conversion.Scope) error {
out.SucceededIndexes = (*string)(unsafe.Pointer(in.SucceededIndexes))
out.SucceededCount = (*int32)(unsafe.Pointer(in.SucceededCount))
return nil
}
// Convert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule is an autogenerated conversion function.
func Convert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(in *batch.SuccessPolicyRule, out *v1.SuccessPolicyRule, s conversion.Scope) error {
return autoConvert_batch_SuccessPolicyRule_To_v1_SuccessPolicyRule(in, out, s)
}
func autoConvert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(in *v1.UncountedTerminatedPods, out *batch.UncountedTerminatedPods, s conversion.Scope) error { func autoConvert_v1_UncountedTerminatedPods_To_batch_UncountedTerminatedPods(in *v1.UncountedTerminatedPods, out *batch.UncountedTerminatedPods, s conversion.Scope) error {
out.Succeeded = *(*[]types.UID)(unsafe.Pointer(&in.Succeeded)) out.Succeeded = *(*[]types.UID)(unsafe.Pointer(&in.Succeeded))
out.Failed = *(*[]types.UID)(unsafe.Pointer(&in.Failed)) out.Failed = *(*[]types.UID)(unsafe.Pointer(&in.Failed))

View File

@@ -66,6 +66,11 @@ const (
// maximum length of the value of the managedBy field // maximum length of the value of the managedBy field
maxManagedByLength = 63 maxManagedByLength = 63
// maximum length of succeededIndexes in JobSuccessPolicy.
maxJobSuccessPolicySucceededIndexesLimit = 64 * 1024
// maximum number of rules in successPolicy.
maxSuccessPolicyRule = 20
) )
var ( var (
@@ -259,6 +264,13 @@ func validateJobSpec(spec *batch.JobSpec, fldPath *field.Path, opts apivalidatio
if spec.PodFailurePolicy != nil { if spec.PodFailurePolicy != nil {
allErrs = append(allErrs, validatePodFailurePolicy(spec, fldPath.Child("podFailurePolicy"))...) allErrs = append(allErrs, validatePodFailurePolicy(spec, fldPath.Child("podFailurePolicy"))...)
} }
if spec.SuccessPolicy != nil {
if ptr.Deref(spec.CompletionMode, batch.NonIndexedCompletion) != batch.IndexedCompletion {
allErrs = append(allErrs, field.Invalid(fldPath.Child("successPolicy"), *spec.SuccessPolicy, "requires indexed completion mode"))
} else {
allErrs = append(allErrs, validateSuccessPolicy(spec, fldPath.Child("successPolicy"))...)
}
}
allErrs = append(allErrs, validatePodReplacementPolicy(spec, fldPath.Child("podReplacementPolicy"))...) allErrs = append(allErrs, validatePodReplacementPolicy(spec, fldPath.Child("podReplacementPolicy"))...)
@@ -400,6 +412,50 @@ func validatePodFailurePolicyRuleOnExitCodes(onExitCode *batch.PodFailurePolicyO
return allErrs return allErrs
} }
func validateSuccessPolicy(spec *batch.JobSpec, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
rulesPath := fldPath.Child("rules")
if len(spec.SuccessPolicy.Rules) == 0 {
allErrs = append(allErrs, field.Required(rulesPath, "at least one rules must be specified when the successPolicy is specified"))
}
if len(spec.SuccessPolicy.Rules) > maxSuccessPolicyRule {
allErrs = append(allErrs, field.TooMany(rulesPath, len(spec.SuccessPolicy.Rules), maxSuccessPolicyRule))
}
for i, rule := range spec.SuccessPolicy.Rules {
allErrs = append(allErrs, validateSuccessPolicyRule(spec, &rule, rulesPath.Index(i))...)
}
return allErrs
}
func validateSuccessPolicyRule(spec *batch.JobSpec, rule *batch.SuccessPolicyRule, rulePath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if rule.SucceededCount == nil && rule.SucceededIndexes == nil {
allErrs = append(allErrs, field.Required(rulePath, "at least one of succeededCount or succeededIndexes must be specified"))
}
var totalIndexes int32
if rule.SucceededIndexes != nil {
succeededIndexes := rulePath.Child("succeededIndexes")
if len(*rule.SucceededIndexes) > maxJobSuccessPolicySucceededIndexesLimit {
allErrs = append(allErrs, field.TooLong(succeededIndexes, *rule.SucceededIndexes, maxJobSuccessPolicySucceededIndexesLimit))
}
var err error
if totalIndexes, err = validateIndexesFormat(*rule.SucceededIndexes, *spec.Completions); err != nil {
allErrs = append(allErrs, field.Invalid(succeededIndexes, *rule.SucceededIndexes, fmt.Sprintf("error parsing succeededIndexes: %s", err.Error())))
}
}
if rule.SucceededCount != nil {
succeededCountPath := rulePath.Child("succeededCount")
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*rule.SucceededCount), succeededCountPath)...)
if *rule.SucceededCount > *spec.Completions {
allErrs = append(allErrs, field.Invalid(succeededCountPath, *rule.SucceededCount, fmt.Sprintf("must be less than or equal to %d (the number of specified completions)", *spec.Completions)))
}
if rule.SucceededIndexes != nil && *rule.SucceededCount > totalIndexes {
allErrs = append(allErrs, field.Invalid(succeededCountPath, *rule.SucceededCount, fmt.Sprintf("must be less than or equal to %d (the number of indexes in the specified succeededIndexes field)", totalIndexes)))
}
}
return allErrs
}
// validateJobStatus validates a JobStatus and returns an ErrorList with any errors. // validateJobStatus validates a JobStatus and returns an ErrorList with any errors.
func validateJobStatus(job *batch.Job, fldPath *field.Path, opts JobStatusValidationOptions) field.ErrorList { func validateJobStatus(job *batch.Job, fldPath *field.Path, opts JobStatusValidationOptions) field.ErrorList {
allErrs := field.ErrorList{} allErrs := field.ErrorList{}
@@ -485,14 +541,14 @@ func validateJobStatus(job *batch.Job, fldPath *field.Path, opts JobStatusValida
} }
if opts.RejectInvalidCompletedIndexes { if opts.RejectInvalidCompletedIndexes {
if job.Spec.Completions != nil { if job.Spec.Completions != nil {
if err := validateIndexesFormat(status.CompletedIndexes, int32(*job.Spec.Completions)); err != nil { if _, err := validateIndexesFormat(status.CompletedIndexes, int32(*job.Spec.Completions)); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("completedIndexes"), status.CompletedIndexes, fmt.Sprintf("error parsing completedIndexes: %s", err.Error()))) allErrs = append(allErrs, field.Invalid(fldPath.Child("completedIndexes"), status.CompletedIndexes, fmt.Sprintf("error parsing completedIndexes: %s", err.Error())))
} }
} }
} }
if opts.RejectInvalidFailedIndexes { if opts.RejectInvalidFailedIndexes {
if job.Spec.Completions != nil && job.Spec.BackoffLimitPerIndex != nil && status.FailedIndexes != nil { if job.Spec.Completions != nil && job.Spec.BackoffLimitPerIndex != nil && status.FailedIndexes != nil {
if err := validateIndexesFormat(*status.FailedIndexes, int32(*job.Spec.Completions)); err != nil { if _, err := validateIndexesFormat(*status.FailedIndexes, int32(*job.Spec.Completions)); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), status.FailedIndexes, fmt.Sprintf("error parsing failedIndexes: %s", err.Error()))) allErrs = append(allErrs, field.Invalid(fldPath.Child("failedIndexes"), status.FailedIndexes, fmt.Sprintf("error parsing failedIndexes: %s", err.Error())))
} }
} }
@@ -522,6 +578,21 @@ func validateJobStatus(job *batch.Job, fldPath *field.Path, opts JobStatusValida
} }
} }
} }
if ptr.Deref(job.Spec.CompletionMode, batch.NonIndexedCompletion) != batch.IndexedCompletion && isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet to NonIndexed Job"))
}
if isJobSuccessCriteriaMet(job) && IsJobFailed(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True and Failed=true conditions"))
}
if isJobSuccessCriteriaMet(job) && isJobFailureTarget(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True and FailureTarget=true conditions"))
}
if job.Spec.SuccessPolicy == nil && isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True for Job without SuccessPolicy"))
}
if job.Spec.SuccessPolicy != nil && !isJobSuccessCriteriaMet(job) && IsJobComplete(job) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("conditions"), field.OmitValueType{}, "cannot set Complete=True for Job with SuccessPolicy unless SuccessCriteriaMet=True"))
}
return allErrs return allErrs
} }
@@ -550,6 +621,7 @@ func ValidateJobSpecUpdate(spec, oldSpec batch.JobSpec, fldPath *field.Path, opt
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.PodFailurePolicy, oldSpec.PodFailurePolicy, fldPath.Child("podFailurePolicy"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.PodFailurePolicy, oldSpec.PodFailurePolicy, fldPath.Child("podFailurePolicy"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.BackoffLimitPerIndex, oldSpec.BackoffLimitPerIndex, fldPath.Child("backoffLimitPerIndex"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.BackoffLimitPerIndex, oldSpec.BackoffLimitPerIndex, fldPath.Child("backoffLimitPerIndex"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.ManagedBy, oldSpec.ManagedBy, fldPath.Child("managedBy"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.ManagedBy, oldSpec.ManagedBy, fldPath.Child("managedBy"))...)
allErrs = append(allErrs, apivalidation.ValidateImmutableField(spec.SuccessPolicy, oldSpec.SuccessPolicy, fldPath.Child("successPolicy"))...)
return allErrs return allErrs
} }
@@ -621,6 +693,12 @@ func ValidateJobStatusUpdate(job, oldJob *batch.Job, opts JobStatusValidationOpt
allErrs = append(allErrs, field.Required(statusFld.Child("startTime"), "startTime cannot be removed for unsuspended job")) allErrs = append(allErrs, field.Required(statusFld.Child("startTime"), "startTime cannot be removed for unsuspended job"))
} }
} }
if isJobSuccessCriteriaMet(oldJob) && !isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(statusFld.Child("conditions"), field.OmitValueType{}, "cannot disable the SuccessCriteriaMet=True condition"))
}
if IsJobComplete(oldJob) && !isJobSuccessCriteriaMet(oldJob) && isJobSuccessCriteriaMet(job) {
allErrs = append(allErrs, field.Invalid(statusFld.Child("conditions"), field.OmitValueType{}, "cannot set SuccessCriteriaMet=True for Job already has Complete=true conditions"))
}
return allErrs return allErrs
} }
@@ -815,6 +893,14 @@ func IsJobFailed(job *batch.Job) bool {
return IsConditionTrue(job.Status.Conditions, batch.JobFailed) return IsConditionTrue(job.Status.Conditions, batch.JobFailed)
} }
func isJobSuccessCriteriaMet(job *batch.Job) bool {
return IsConditionTrue(job.Status.Conditions, batch.JobSuccessCriteriaMet)
}
func isJobFailureTarget(job *batch.Job) bool {
return IsConditionTrue(job.Status.Conditions, batch.JobFailureTarget)
}
func IsConditionTrue(list []batch.JobCondition, cType batch.JobConditionType) bool { func IsConditionTrue(list []batch.JobCondition, cType batch.JobConditionType) bool {
for _, c := range list { for _, c := range list {
if c.Type == cType && c.Status == api.ConditionTrue { if c.Type == cType && c.Status == api.ConditionTrue {
@@ -870,22 +956,24 @@ func validateFailedIndexesNotOverlapCompleted(completedIndexesStr string, failed
return nil return nil
} }
func validateIndexesFormat(indexesStr string, completions int32) error { func validateIndexesFormat(indexesStr string, completions int32) (int32, error) {
if len(indexesStr) == 0 { if len(indexesStr) == 0 {
return nil return 0, nil
} }
var lastIndex *int32 var lastIndex *int32
var total int32
for _, intervalStr := range strings.Split(indexesStr, ",") { for _, intervalStr := range strings.Split(indexesStr, ",") {
x, y, err := parseIndexInterval(intervalStr, completions) x, y, err := parseIndexInterval(intervalStr, completions)
if err != nil { if err != nil {
return err return 0, err
} }
if lastIndex != nil && *lastIndex >= x { if lastIndex != nil && *lastIndex >= x {
return fmt.Errorf("non-increasing order, previous: %d, current: %d", *lastIndex, x) return 0, fmt.Errorf("non-increasing order, previous: %d, current: %d", *lastIndex, x)
} }
total += y - x + 1
lastIndex = &y lastIndex = &y
} }
return nil return total, nil
} }
func parseIndexInterval(intervalStr string, completions int32) (int32, int32, error) { func parseIndexInterval(intervalStr string, completions int32) (int32, int32, error) {

View File

@@ -119,6 +119,29 @@ func TestValidateJob(t *testing.T) {
opts JobValidationOptions opts JobValidationOptions
job batch.Job job batch.Job
}{ }{
"valid success policy": {
opts: JobValidationOptions{RequirePrefixedLabels: true},
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{
{
SucceededCount: ptr.To[int32](1),
SucceededIndexes: ptr.To("0,2,4"),
},
{
SucceededIndexes: ptr.To("1,3,5-9"),
},
},
},
},
},
},
"valid pod failure policy": { "valid pod failure policy": {
opts: JobValidationOptions{RequirePrefixedLabels: true}, opts: JobValidationOptions{RequirePrefixedLabels: true},
job: batch.Job{ job: batch.Job{
@@ -429,6 +452,159 @@ func TestValidateJob(t *testing.T) {
}, },
}, },
}, },
`spec.successPolicy: Invalid value: batch.SuccessPolicy{Rules:[]batch.SuccessPolicyRule{}}: requires indexed completion mode`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{},
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules: Required value: at least one rules must be specified when the successPolicy is specified`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules[0]: Required value: at least one of succeededCount or succeededIndexes must be specified`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: nil,
SucceededIndexes: nil,
}},
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules[0].succeededIndexes: Invalid value: "invalid-format": error parsing succeededIndexes: cannot convert string to integer for index: "invalid"`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("invalid-format"),
}},
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules[0].succeededIndexes: Too long: must have at most 65536 bytes`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To(strings.Repeat("1", maxJobSuccessPolicySucceededIndexesLimit+1)),
}},
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules[0].succeededCount: must be greater than or equal to 0`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](-1),
}},
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules[0].succeededCount: Invalid value: 6: must be less than or equal to 5 (the number of specified completions)`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](6),
}},
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules[0].succeededCount: Invalid value: 4: must be less than or equal to 3 (the number of indexes in the specified succeededIndexes field)`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](4),
SucceededIndexes: ptr.To("0-2"),
}},
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.successPolicy.rules: Too many: 21: must have at most 20 items`: {
job: batch.Job{
ObjectMeta: validJobObjectMeta,
Spec: batch.JobSpec{
Selector: validGeneratedSelector,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: func() []batch.SuccessPolicyRule {
var rules []batch.SuccessPolicyRule
for i := 0; i < 21; i++ {
rules = append(rules, batch.SuccessPolicyRule{
SucceededCount: ptr.To[int32](5),
})
}
return rules
}(),
},
},
},
opts: JobValidationOptions{RequirePrefixedLabels: true},
},
`spec.podFailurePolicy.rules[0]: Invalid value: specifying one of OnExitCodes and OnPodConditions is required`: { `spec.podFailurePolicy.rules[0]: Invalid value: specifying one of OnExitCodes and OnPodConditions is required`: {
job: batch.Job{ job: batch.Job{
ObjectMeta: validJobObjectMeta, ObjectMeta: validJobObjectMeta,
@@ -1465,6 +1641,76 @@ func TestValidateJobUpdate(t *testing.T) {
Field: "spec.selector", Field: "spec.selector",
}, },
}, },
"add success policy": {
old: batch.Job{
ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault},
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Selector: validGeneratedSelector,
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
},
},
update: func(job *batch.Job) {
job.Spec.SuccessPolicy = &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](2),
}},
}
},
err: &field.Error{
Type: field.ErrorTypeInvalid,
Field: "spec.successPolicy",
},
},
"update success policy": {
old: batch.Job{
ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault},
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Selector: validGeneratedSelector,
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("1-3"),
}},
},
},
},
update: func(job *batch.Job) {
job.Spec.SuccessPolicy.Rules = append(job.Spec.SuccessPolicy.Rules, batch.SuccessPolicyRule{
SucceededCount: ptr.To[int32](3),
})
},
err: &field.Error{
Type: field.ErrorTypeInvalid,
Field: "spec.successPolicy",
},
},
"remove success policy": {
old: batch.Job{
ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault},
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](5),
Selector: validGeneratedSelector,
Template: validPodTemplateSpecForGeneratedRestartPolicyNever,
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("1-3"),
}},
},
},
},
update: func(job *batch.Job) {
job.Spec.SuccessPolicy = nil
},
err: &field.Error{
Type: field.ErrorTypeInvalid,
Field: "spec.successPolicy",
},
},
"add pod failure policy": { "add pod failure policy": {
old: batch.Job{ old: batch.Job{
ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault}, ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault},
@@ -3662,74 +3908,88 @@ func TestValidateIndexesString(t *testing.T) {
testCases := map[string]struct { testCases := map[string]struct {
indexesString string indexesString string
completions int32 completions int32
wantTotal int32
wantError error wantError error
}{ }{
"empty is valid": { "empty is valid": {
indexesString: "", indexesString: "",
completions: 6, completions: 6,
wantTotal: 0,
}, },
"single number is valid": { "single number is valid": {
indexesString: "1", indexesString: "1",
completions: 6, completions: 6,
wantTotal: 1,
}, },
"single interval is valid": { "single interval is valid": {
indexesString: "1-3", indexesString: "1-3",
completions: 6, completions: 6,
wantTotal: 3,
}, },
"mixed intervals valid": { "mixed intervals valid": {
indexesString: "0,1-3,5,7-10", indexesString: "0,1-3,5,7-10",
completions: 12, completions: 12,
wantTotal: 9,
}, },
"invalid due to extra space": { "invalid due to extra space": {
indexesString: "0,1-3, 5", indexesString: "0,1-3, 5",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`cannot convert string to integer for index: " 5"`), wantError: errors.New(`cannot convert string to integer for index: " 5"`),
}, },
"invalid due to too large index": { "invalid due to too large index": {
indexesString: "0,1-3,5", indexesString: "0,1-3,5",
completions: 5, completions: 5,
wantTotal: 0,
wantError: errors.New(`too large index: "5"`), wantError: errors.New(`too large index: "5"`),
}, },
"invalid due to non-increasing order of intervals": { "invalid due to non-increasing order of intervals": {
indexesString: "1-3,0,5", indexesString: "1-3,0,5",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`non-increasing order, previous: 3, current: 0`), wantError: errors.New(`non-increasing order, previous: 3, current: 0`),
}, },
"invalid due to non-increasing order between intervals": { "invalid due to non-increasing order between intervals": {
indexesString: "0,0,5", indexesString: "0,0,5",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`non-increasing order, previous: 0, current: 0`), wantError: errors.New(`non-increasing order, previous: 0, current: 0`),
}, },
"invalid due to non-increasing order within interval": { "invalid due to non-increasing order within interval": {
indexesString: "0,1-1,5", indexesString: "0,1-1,5",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`non-increasing order, previous: 1, current: 1`), wantError: errors.New(`non-increasing order, previous: 1, current: 1`),
}, },
"invalid due to starting with '-'": { "invalid due to starting with '-'": {
indexesString: "-1,0", indexesString: "-1,0",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`cannot convert string to integer for index: ""`), wantError: errors.New(`cannot convert string to integer for index: ""`),
}, },
"invalid due to ending with '-'": { "invalid due to ending with '-'": {
indexesString: "0,1-", indexesString: "0,1-",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`cannot convert string to integer for index: ""`), wantError: errors.New(`cannot convert string to integer for index: ""`),
}, },
"invalid due to repeated '-'": { "invalid due to repeated '-'": {
indexesString: "0,1--3", indexesString: "0,1--3",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`the fragment "1--3" violates the requirement that an index interval can have at most two parts separated by '-'`), wantError: errors.New(`the fragment "1--3" violates the requirement that an index interval can have at most two parts separated by '-'`),
}, },
"invalid due to repeated ','": { "invalid due to repeated ','": {
indexesString: "0,,1,3", indexesString: "0,,1,3",
completions: 6, completions: 6,
wantTotal: 0,
wantError: errors.New(`cannot convert string to integer for index: ""`), wantError: errors.New(`cannot convert string to integer for index: ""`),
}, },
} }
for name, tc := range testCases { for name, tc := range testCases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
gotErr := validateIndexesFormat(tc.indexesString, tc.completions) gotTotal, gotErr := validateIndexesFormat(tc.indexesString, tc.completions)
if tc.wantError == nil && gotErr != nil { if tc.wantError == nil && gotErr != nil {
t.Errorf("unexpected error: %s", gotErr) t.Errorf("unexpected error: %s", gotErr)
} else if tc.wantError != nil && gotErr == nil { } else if tc.wantError != nil && gotErr == nil {
@@ -3739,6 +3999,9 @@ func TestValidateIndexesString(t *testing.T) {
t.Errorf("unexpected error, diff: %s", diff) t.Errorf("unexpected error, diff: %s", diff)
} }
} }
if tc.wantTotal != gotTotal {
t.Errorf("unexpected total want:%d, got:%d", tc.wantTotal, gotTotal)
}
}) })
} }
} }

View File

@@ -257,6 +257,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
*out = new(PodFailurePolicy) *out = new(PodFailurePolicy)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.SuccessPolicy != nil {
in, out := &in.SuccessPolicy, &out.SuccessPolicy
*out = new(SuccessPolicy)
(*in).DeepCopyInto(*out)
}
if in.ActiveDeadlineSeconds != nil { if in.ActiveDeadlineSeconds != nil {
in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds
*out = new(int64) *out = new(int64)
@@ -486,6 +491,55 @@ func (in *PodFailurePolicyRule) DeepCopy() *PodFailurePolicyRule {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicy) DeepCopyInto(out *SuccessPolicy) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]SuccessPolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicy.
func (in *SuccessPolicy) DeepCopy() *SuccessPolicy {
if in == nil {
return nil
}
out := new(SuccessPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicyRule) DeepCopyInto(out *SuccessPolicyRule) {
*out = *in
if in.SucceededIndexes != nil {
in, out := &in.SucceededIndexes, &out.SucceededIndexes
*out = new(string)
**out = **in
}
if in.SucceededCount != nil {
in, out := &in.SucceededCount, &out.SucceededCount
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicyRule.
func (in *SuccessPolicyRule) DeepCopy() *SuccessPolicyRule {
if in == nil {
return nil
}
out := new(SuccessPolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) { func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) {
*out = *in *out = *in

View File

@@ -838,8 +838,11 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
var manageJobErr error var manageJobErr error
exceedsBackoffLimit := jobCtx.failed > *job.Spec.BackoffLimit exceedsBackoffLimit := jobCtx.failed > *job.Spec.BackoffLimit
jobCtx.finishedCondition = hasSuccessCriteriaMetCondition(&job)
if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) { // Given that the Job already has the SuccessCriteriaMet condition, the termination condition already had confirmed in another cycle.
// So, the job-controller evaluates the podFailurePolicy only when the Job doesn't have the SuccessCriteriaMet condition.
if jobCtx.finishedCondition == nil && feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) {
if failureTargetCondition := findConditionByType(job.Status.Conditions, batch.JobFailureTarget); failureTargetCondition != nil { if failureTargetCondition := findConditionByType(job.Status.Conditions, batch.JobFailureTarget); failureTargetCondition != nil {
jobCtx.finishedCondition = newFailedConditionForFailureTarget(failureTargetCondition, jm.clock.Now()) jobCtx.finishedCondition = newFailedConditionForFailureTarget(failureTargetCondition, jm.clock.Now())
} else if failJobMessage := getFailJobMessage(&job, pods); failJobMessage != nil { } else if failJobMessage := getFailJobMessage(&job, pods); failJobMessage != nil {
@@ -875,6 +878,11 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
} }
jobCtx.podsWithDelayedDeletionPerIndex = getPodsWithDelayedDeletionPerIndex(logger, jobCtx) jobCtx.podsWithDelayedDeletionPerIndex = getPodsWithDelayedDeletionPerIndex(logger, jobCtx)
} }
if jobCtx.finishedCondition == nil && hasSuccessCriteriaMetCondition(jobCtx.job) == nil {
if msg, met := matchSuccessPolicy(logger, job.Spec.SuccessPolicy, *job.Spec.Completions, jobCtx.succeededIndexes); met {
jobCtx.finishedCondition = newCondition(batch.JobSuccessCriteriaMet, v1.ConditionTrue, batch.JobReasonSuccessPolicy, msg, jm.clock.Now())
}
}
} }
suspendCondChanged := false suspendCondChanged := false
// Remove active pods if Job failed. // Remove active pods if Job failed.
@@ -1089,8 +1097,8 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
needsFlush = true needsFlush = true
uncountedStatus.Succeeded = append(uncountedStatus.Succeeded, pod.UID) uncountedStatus.Succeeded = append(uncountedStatus.Succeeded, pod.UID)
} }
} else if considerPodFailed || jobCtx.finishedCondition != nil { } else if considerPodFailed || (jobCtx.finishedCondition != nil && !isSuccessCriteriaMetCondition(jobCtx.finishedCondition)) {
// When the job is considered finished, every non-terminated pod is considered failed // When the job is considered finished, every non-terminated pod is considered failed.
ix := getCompletionIndex(pod.Annotations) ix := getCompletionIndex(pod.Annotations)
if !jobCtx.uncounted.failed.Has(string(pod.UID)) && (!isIndexed || (ix != unknownCompletionIndex && ix < int(*jobCtx.job.Spec.Completions))) { if !jobCtx.uncounted.failed.Has(string(pod.UID)) && (!isIndexed || (ix != unknownCompletionIndex && ix < int(*jobCtx.job.Spec.Completions))) {
if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) && jobCtx.job.Spec.PodFailurePolicy != nil { if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) && jobCtx.job.Spec.PodFailurePolicy != nil {
@@ -1150,6 +1158,17 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
jobCtx.finishedCondition = newFailedConditionForFailureTarget(jobCtx.finishedCondition, jm.clock.Now()) jobCtx.finishedCondition = newFailedConditionForFailureTarget(jobCtx.finishedCondition, jm.clock.Now())
} }
} }
if isSuccessCriteriaMetCondition(jobCtx.finishedCondition) {
// Append the interim SuccessCriteriaMet condition to update the job status with before finalizers are removed.
if hasSuccessCriteriaMetCondition(jobCtx.job) == nil {
jobCtx.job.Status.Conditions = append(jobCtx.job.Status.Conditions, *jobCtx.finishedCondition)
needsFlush = true
}
// Prepare the final Complete condition to update the job status with after the finalizers are removed.
// It is also used in the enactJobFinished function for reporting.
jobCtx.finishedCondition = newCondition(batch.JobComplete, v1.ConditionTrue, jobCtx.finishedCondition.Reason, jobCtx.finishedCondition.Message, jm.clock.Now())
}
var err error var err error
if jobCtx.job, needsFlush, err = jm.flushUncountedAndRemoveFinalizers(ctx, jobCtx, podsToRemoveFinalizer, uidsWithFinalizer, &oldCounters, podFailureCountByPolicyAction, needsFlush); err != nil { if jobCtx.job, needsFlush, err = jm.flushUncountedAndRemoveFinalizers(ctx, jobCtx, podsToRemoveFinalizer, uidsWithFinalizer, &oldCounters, podFailureCountByPolicyAction, needsFlush); err != nil {
return err return err
@@ -1177,7 +1196,8 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
// - the Pod is considered failed, unless it's removal is delayed for the // - the Pod is considered failed, unless it's removal is delayed for the
// purpose of transferring the JobIndexFailureCount annotations to the // purpose of transferring the JobIndexFailureCount annotations to the
// replacement pod. the entire Job is terminating the finalizer can be // replacement pod. the entire Job is terminating the finalizer can be
// removed unconditionally. // removed unconditionally; or
// - the Job met successPolicy.
func canRemoveFinalizer(logger klog.Logger, jobCtx *syncJobCtx, pod *v1.Pod, considerPodFailed bool) bool { func canRemoveFinalizer(logger klog.Logger, jobCtx *syncJobCtx, pod *v1.Pod, considerPodFailed bool) bool {
if jobCtx.job.DeletionTimestamp != nil || jobCtx.finishedCondition != nil || pod.Status.Phase == v1.PodSucceeded { if jobCtx.job.DeletionTimestamp != nil || jobCtx.finishedCondition != nil || pod.Status.Phase == v1.PodSucceeded {
return true return true
@@ -1197,8 +1217,8 @@ func canRemoveFinalizer(logger klog.Logger, jobCtx *syncJobCtx, pod *v1.Pod, con
} }
// flushUncountedAndRemoveFinalizers does: // flushUncountedAndRemoveFinalizers does:
// 1. flush the Job status that might include new uncounted Pod UIDs. Also flush the interim FailureTarget condition // 1. flush the Job status that might include new uncounted Pod UIDs.
// if present. // Also flush the interim FailureTarget and SuccessCriteriaMet conditions if present.
// 2. perform the removal of finalizers from Pods which are in the uncounted // 2. perform the removal of finalizers from Pods which are in the uncounted
// lists. // lists.
// 3. update the counters based on the Pods for which it successfully removed // 3. update the counters based on the Pods for which it successfully removed

View File

@@ -1274,7 +1274,8 @@ func TestGetNewFinshedPods(t *testing.T) {
func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) { func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t) logger, ctx := ktesting.NewTestContext(t)
succeededCond := newCondition(batch.JobComplete, v1.ConditionTrue, "", "", realClock.Now()) completedCond := newCondition(batch.JobComplete, v1.ConditionTrue, "", "", realClock.Now())
succeededCond := newCondition(batch.JobSuccessCriteriaMet, v1.ConditionTrue, "", "", realClock.Now())
failedCond := newCondition(batch.JobFailed, v1.ConditionTrue, "", "", realClock.Now()) failedCond := newCondition(batch.JobFailed, v1.ConditionTrue, "", "", realClock.Now())
indexedCompletion := batch.IndexedCompletion indexedCompletion := batch.IndexedCompletion
mockErr := errors.New("mock error") mockErr := errors.New("mock error")
@@ -1294,6 +1295,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
// features // features
enableJobBackoffLimitPerIndex bool enableJobBackoffLimitPerIndex bool
enableJobSuccessPolicy bool
}{ }{
"no updates": {}, "no updates": {},
"new active": { "new active": {
@@ -1420,12 +1422,40 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
wantSucceededPodsMetric: 3, wantSucceededPodsMetric: 3,
wantFailedPodsMetric: 3, wantFailedPodsMetric: 3,
}, },
"succeeding job": { "succeeding job by JobSuccessPolicy": {
pods: []*v1.Pod{
buildPod().uid("a").phase(v1.PodSucceeded).trackingFinalizer().Pod,
buildPod().uid("b").phase(v1.PodFailed).trackingFinalizer().Pod,
buildPod().uid("c").phase(v1.PodPending).trackingFinalizer().Pod,
},
finishedCond: succeededCond,
wantRmFinalizers: 3,
wantStatusUpdates: []batch.JobStatus{
{
UncountedTerminatedPods: &batch.UncountedTerminatedPods{
Succeeded: []types.UID{"a"},
Failed: []types.UID{"b"},
},
Conditions: []batch.JobCondition{*succeededCond},
},
{
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Succeeded: 1,
Failed: 1,
Conditions: []batch.JobCondition{*succeededCond, *completedCond},
CompletionTime: &succeededCond.LastTransitionTime,
},
},
wantSucceededPodsMetric: 1,
wantFailedPodsMetric: 1,
enableJobSuccessPolicy: true,
},
"completing job": {
pods: []*v1.Pod{ pods: []*v1.Pod{
buildPod().uid("a").phase(v1.PodSucceeded).trackingFinalizer().Pod, buildPod().uid("a").phase(v1.PodSucceeded).trackingFinalizer().Pod,
buildPod().uid("b").phase(v1.PodFailed).trackingFinalizer().Pod, buildPod().uid("b").phase(v1.PodFailed).trackingFinalizer().Pod,
}, },
finishedCond: succeededCond, finishedCond: completedCond,
wantRmFinalizers: 2, wantRmFinalizers: 2,
wantStatusUpdates: []batch.JobStatus{ wantStatusUpdates: []batch.JobStatus{
{ {
@@ -1438,8 +1468,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
UncountedTerminatedPods: &batch.UncountedTerminatedPods{}, UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Succeeded: 1, Succeeded: 1,
Failed: 1, Failed: 1,
Conditions: []batch.JobCondition{*succeededCond}, Conditions: []batch.JobCondition{*completedCond},
CompletionTime: &succeededCond.LastTransitionTime, CompletionTime: &completedCond.LastTransitionTime,
}, },
}, },
wantSucceededPodsMetric: 1, wantSucceededPodsMetric: 1,
@@ -1900,6 +1930,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
for name, tc := range cases { for name, tc := range cases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)()
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)()
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
manager, _ := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc) manager, _ := newControllerFromClient(ctx, t, clientSet, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{Err: tc.podControlErr} fakePodControl := controller.FakePodControl{Err: tc.podControlErr}
@@ -1934,9 +1966,24 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
if !errors.Is(err, tc.wantErr) { if !errors.Is(err, tc.wantErr) {
t.Errorf("Got error %v, want %v", err, tc.wantErr) t.Errorf("Got error %v, want %v", err, tc.wantErr)
} }
if diff := cmp.Diff(tc.wantStatusUpdates, statusUpdates, cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" { cmpOpts := []cmp.Option{cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")}
if tc.finishedCond != nil && tc.finishedCond.Type == batch.JobSuccessCriteriaMet {
cmpOpts = append(cmpOpts, cmpopts.IgnoreFields(batch.JobStatus{}, "CompletionTime"))
}
if diff := cmp.Diff(tc.wantStatusUpdates, statusUpdates, cmpOpts...); diff != "" {
t.Errorf("Unexpected status updates (-want,+got):\n%s", diff) t.Errorf("Unexpected status updates (-want,+got):\n%s", diff)
} }
// If we set successCondition with the SuccessCriteriaMet, the job-controller adds the Complete condition to the Job while reconciling,
// then the added Complete condition LastTransitionTime is used as a CompletionTime.
// So, we verify if the CompletionTime is after the SuccessCriteriaMet LastTransitionTime.
if tc.finishedCond != nil && tc.finishedCond.Type == batch.JobSuccessCriteriaMet && len(tc.wantStatusUpdates) != 0 {
for i := range tc.wantStatusUpdates {
if tc.wantStatusUpdates[i].CompletionTime != nil && !tc.wantStatusUpdates[i].CompletionTime.Before(statusUpdates[i].CompletionTime) {
t.Errorf("Unexpected completionTime; completionTime %v must be after %v",
tc.wantStatusUpdates[i].CompletionTime, statusUpdates[i].CompletionTime)
}
}
}
rmFinalizers := len(fakePodControl.Patches) rmFinalizers := len(fakePodControl.Patches)
if rmFinalizers != tc.wantRmFinalizers { if rmFinalizers != tc.wantRmFinalizers {
t.Errorf("Removed %d finalizers, want %d", rmFinalizers, tc.wantRmFinalizers) t.Errorf("Removed %d finalizers, want %d", rmFinalizers, tc.wantRmFinalizers)
@@ -3540,6 +3587,788 @@ func TestSyncJobWithJobPodFailurePolicy(t *testing.T) {
} }
} }
func TestSyncJobWithJobSuccessPolicy(t *testing.T) {
now := time.Now()
validTypeMeta := metav1.TypeMeta{
APIVersion: batch.SchemeGroupVersion.String(),
Kind: "Job",
}
validObjectMeta := metav1.ObjectMeta{
Name: "foobar",
UID: uuid.NewUUID(),
Namespace: metav1.NamespaceDefault,
}
validSelector := &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
}
validTemplate := v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Image: "foobar"},
},
},
}
testCases := map[string]struct {
enableJobFailurePolicy bool
enableBackoffLimitPerIndex bool
enableJobSuccessPolicy bool
job batch.Job
pods []v1.Pod
wantStatus batch.JobStatus
}{
"job with successPolicy; job has SuccessCriteriaMet condition if job meets to successPolicy and some indexes fail": {
enableJobSuccessPolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](3),
Completions: ptr.To[int32](3),
BackoffLimit: ptr.To[int32](math.MaxInt32),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a1").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("a2").index("0").phase(v1.PodRunning).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
*buildPod().uid("c").index("2").phase(v1.PodRunning).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
"job with podFailurePolicy and successPolicy; job has SuccessCriteriaMet condition if job meets to successPolicy and doesn't meet to podFailurePolicy": {
enableJobSuccessPolicy: true,
enableJobFailurePolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
PodFailurePolicy: &batch.PodFailurePolicy{
Rules: []batch.PodFailurePolicyRule{{
Action: batch.PodFailurePolicyActionFailJob,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
}},
}},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a1").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("a2").index("0").phase(v1.PodRunning).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
"job with backoffLimitPerIndex and successPolicy; job has SuccessCriteriaMet condition if job meets to successPolicy and doesn't meet backoffLimitPerIndex": {
enableJobSuccessPolicy: true,
enableBackoffLimitPerIndex: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
BackoffLimitPerIndex: ptr.To[int32](2),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a1").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("a2").index("0").phase(v1.PodRunning).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
FailedIndexes: ptr.To(""),
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
"job with successPolicy; job has both Complete and SuccessCriteriaMet condition when job meets to successPolicy and all pods have been already removed": {
enableJobSuccessPolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
BackoffLimitPerIndex: ptr.To[int32](2),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
// In the current mechanism, the job controller adds Complete condition to Job
// even if some running pods still remain.
// So, we need to revisit here before we graduate the JobSuccessPolicy to beta.
// TODO(#123775): A Job might finish with ready!=0
// REF: https://github.com/kubernetes/kubernetes/issues/123775
"job with successPolicy; job has SuccessCriteriaMet and Complete condition when job meets to successPolicy and some pods still are running": {
enableJobSuccessPolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](3),
Completions: ptr.To[int32](3),
BackoffLimit: ptr.To[int32](math.MaxInt32),
BackoffLimitPerIndex: ptr.To[int32](3),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a1").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("a2").index("1").phase(v1.PodRunning).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
*buildPod().uid("c").index("2").phase(v1.PodRunning).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
"job with successPolicy and podFailurePolicy; job has a failed condition when job meets to both successPolicy and podFailurePolicy": {
enableJobSuccessPolicy: true,
enableJobFailurePolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
PodFailurePolicy: &batch.PodFailurePolicy{
Rules: []batch.PodFailurePolicyRule{{
Action: batch.PodFailurePolicyActionFailJob,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
}},
}},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a1").index("0").status(v1.PodStatus{
Phase: v1.PodFailed,
Conditions: []v1.PodCondition{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
}},
}).trackingFinalizer().Pod,
*buildPod().uid("a2").index("0").phase(v1.PodRunning).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 2,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobFailureTarget,
Status: v1.ConditionTrue,
Reason: batch.JobReasonPodFailurePolicy,
Message: "Pod default/mypod-0 has condition DisruptionTarget matching FailJob rule at index 0",
},
{
Type: batch.JobFailed,
Status: v1.ConditionTrue,
Reason: batch.JobReasonPodFailurePolicy,
Message: "Pod default/mypod-0 has condition DisruptionTarget matching FailJob rule at index 0",
},
},
},
},
"job with successPolicy and backoffLimitPerIndex; job has a failed condition when job meets to both successPolicy and backoffLimitPerIndex": {
enableJobSuccessPolicy: true,
enableBackoffLimitPerIndex: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
BackoffLimitPerIndex: ptr.To[int32](1),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a").index("0").phase(v1.PodFailed).indexFailureCount("1").trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
FailedIndexes: ptr.To("0"),
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobFailed,
Status: v1.ConditionTrue,
Reason: batch.JobReasonFailedIndexes,
Message: "Job has failed indexes",
},
},
},
},
"job with successPolicy and backoffLimit; job has a failed condition when job meets to both successPolicy and backoffLimit": {
enableJobSuccessPolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](1),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a1").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("a2").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 2,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobFailed,
Status: v1.ConditionTrue,
Reason: batch.JobReasonBackoffLimitExceeded,
Message: "Job has reached the specified backoff limit",
},
},
},
},
"job with successPolicy and podFailurePolicy; job with SuccessCriteriaMet has never been transitioned to FailureTarget and Failed even if job meets podFailurePolicy": {
enableJobSuccessPolicy: true,
enableJobFailurePolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](1),
}},
},
PodFailurePolicy: &batch.PodFailurePolicy{
Rules: []batch.PodFailurePolicyRule{{
Action: batch.PodFailurePolicyActionFailJob,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
}},
}},
},
},
Status: batch.JobStatus{
Failed: 0,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a").index("0").status(v1.PodStatus{
Phase: v1.PodFailed,
Conditions: []v1.PodCondition{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
}},
}).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
"job with successPolicy and backoffLimitPerIndex; job with SuccessCriteriaMet has never been transitioned to FailureTarget and Failed even if job meet backoffLimitPerIndex": {
enableJobSuccessPolicy: true,
enableBackoffLimitPerIndex: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
BackoffLimitPerIndex: ptr.To[int32](1),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("1"),
}},
},
},
Status: batch.JobStatus{
Failed: 0,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a").index("0").phase(v1.PodFailed).indexFailureCount("1").trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
FailedIndexes: ptr.To("0"),
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
"job with successPolicy and backoffLimit: job with SuccessCriteriaMet has never been transitioned to FailureTarget and Failed even if job meets backoffLimit": {
enableJobSuccessPolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](1),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
},
Status: batch.JobStatus{
Failed: 0,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a").index("0").phase(v1.PodFailed).trackingFinalizer().Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
{
Type: batch.JobComplete,
Status: v1.ConditionTrue,
Reason: batch.JobReasonSuccessPolicy,
Message: "Matched rules at index 0",
},
},
},
},
"job with successPolicy and podFailureTarget; job with FailureTarget has never been transitioned to SuccessCriteriaMet even if job meets successPolicy": {
enableJobSuccessPolicy: true,
enableJobFailurePolicy: true,
job: batch.Job{
TypeMeta: validTypeMeta,
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
Selector: validSelector,
Template: validTemplate,
CompletionMode: completionModePtr(batch.IndexedCompletion),
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
BackoffLimit: ptr.To[int32](math.MaxInt32),
SuccessPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0,1"),
SucceededCount: ptr.To[int32](1),
}},
},
PodFailurePolicy: &batch.PodFailurePolicy{
Rules: []batch.PodFailurePolicyRule{{
Action: batch.PodFailurePolicyActionFailJob,
OnPodConditions: []batch.PodFailurePolicyOnPodConditionsPattern{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
}},
}},
},
},
Status: batch.JobStatus{
Failed: 1,
Succeeded: 0,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobFailureTarget,
Status: v1.ConditionTrue,
Reason: batch.JobReasonPodFailurePolicy,
Message: "Pod default/mypod-0 has condition DisruptionTarget matching FailJob rule at index 0",
},
},
},
},
pods: []v1.Pod{
*buildPod().uid("a").index("0").status(v1.PodStatus{
Phase: v1.PodFailed,
Conditions: []v1.PodCondition{{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
}},
}).Pod,
*buildPod().uid("b").index("1").phase(v1.PodSucceeded).trackingFinalizer().Pod,
},
wantStatus: batch.JobStatus{
Failed: 1,
Succeeded: 1,
Terminating: ptr.To[int32](0),
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
Conditions: []batch.JobCondition{
{
Type: batch.JobFailureTarget,
Status: v1.ConditionTrue,
Reason: batch.JobReasonPodFailurePolicy,
Message: "Pod default/mypod-0 has condition DisruptionTarget matching FailJob rule at index 0",
},
{
Type: batch.JobFailed,
Status: v1.ConditionTrue,
Reason: batch.JobReasonPodFailurePolicy,
Message: "Pod default/mypod-0 has condition DisruptionTarget matching FailJob rule at index 0",
},
},
},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobFailurePolicy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableBackoffLimitPerIndex)()
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)()
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
fakeClock := clocktesting.NewFakeClock(now)
_, ctx := ktesting.NewTestContext(t)
manager, sharedInformerFactory := newControllerFromClientWithClock(ctx, t, clientSet, controller.NoResyncPeriodFunc, fakeClock)
manager.podControl = &controller.FakePodControl{}
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
job := &tc.job
actual := job
manager.updateStatusHandler = func(_ context.Context, j *batch.Job) (*batch.Job, error) {
actual = j
return j, nil
}
if err := sharedInformerFactory.Batch().V1().Jobs().Informer().GetIndexer().Add(job); err != nil {
t.Fatalf("Failed to add the Job %q to sharedInformer: %v", klog.KObj(job), err)
}
for i, pod := range tc.pods {
pb := podBuilder{Pod: pod.DeepCopy()}.name(fmt.Sprintf("mypod-%d", i)).job(job)
if isIndexedJob(job) {
pb.index(strconv.Itoa(getCompletionIndex(pod.Annotations)))
}
if err := sharedInformerFactory.Core().V1().Pods().Informer().GetIndexer().Add(pb.Pod); err != nil {
t.Fatalf("Failed to add the Pod %q to sharedInformer: %v", klog.KObj(pb.Pod), err)
}
}
if err := manager.syncJob(ctx, testutil.GetKey(job, t)); err != nil {
t.Fatalf("Failed to complete syncJob: %v", err)
}
if diff := cmp.Diff(tc.wantStatus, actual.Status,
cmpopts.IgnoreFields(batch.JobStatus{}, "StartTime", "CompletionTime", "Ready"),
cmpopts.IgnoreFields(batch.JobCondition{}, "LastProbeTime", "LastTransitionTime")); diff != "" {
t.Errorf("Unexpectd Job status (-want,+got):\n%s", diff)
}
})
}
}
func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) { func TestSyncJobWithJobBackoffLimitPerIndex(t *testing.T) {
_, ctx := ktesting.NewTestContext(t) _, ctx := ktesting.NewTestContext(t)
now := time.Now() now := time.Now()

View File

@@ -0,0 +1,87 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"fmt"
batch "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
)
func matchSuccessPolicy(logger klog.Logger, successPolicy *batch.SuccessPolicy, completions int32, succeededIndexes orderedIntervals) (string, bool) {
if successPolicy == nil || len(succeededIndexes) == 0 {
return "", false
}
rulesMatchedMsg := "Matched rules at index"
for index, rule := range successPolicy.Rules {
if rule.SucceededIndexes != nil {
requiredIndexes := parseIndexesFromString(logger, *rule.SucceededIndexes, int(completions))
// Failed to parse succeededIndexes of the rule due to some errors like invalid format.
if len(requiredIndexes) == 0 {
continue
}
if matchSucceededIndexesRule(requiredIndexes, succeededIndexes, rule.SucceededCount) {
return fmt.Sprintf("%s %d", rulesMatchedMsg, index), true
}
} else if rule.SucceededCount != nil && succeededIndexes.total() >= int(*rule.SucceededCount) {
return fmt.Sprintf("%s %d", rulesMatchedMsg, index), true
}
}
return "", false
}
func hasSuccessCriteriaMetCondition(job *batch.Job) *batch.JobCondition {
if feature.DefaultFeatureGate.Enabled(features.JobSuccessPolicy) {
successCriteriaMet := findConditionByType(job.Status.Conditions, batch.JobSuccessCriteriaMet)
if successCriteriaMet != nil && successCriteriaMet.Status == v1.ConditionTrue {
return successCriteriaMet
}
}
return nil
}
func isSuccessCriteriaMetCondition(cond *batch.JobCondition) bool {
return feature.DefaultFeatureGate.Enabled(features.JobSuccessPolicy) &&
cond != nil && cond.Type == batch.JobSuccessCriteriaMet && cond.Status == v1.ConditionTrue
}
func matchSucceededIndexesRule(ruleIndexes, succeededIndexes orderedIntervals, succeededCount *int32) bool {
var contains, succeededPointer, rulePointer int
for rulePointer < len(ruleIndexes) && succeededPointer < len(succeededIndexes) {
if overlap := min(ruleIndexes[rulePointer].Last, succeededIndexes[succeededPointer].Last) -
max(ruleIndexes[rulePointer].First, succeededIndexes[succeededPointer].First) + 1; overlap > 0 {
contains += overlap
}
if succeededIndexes[succeededPointer].Last < ruleIndexes[rulePointer].Last {
// The current succeeded interval is behind, so we can move to the next.
succeededPointer++
} else if succeededIndexes[succeededPointer].Last > ruleIndexes[rulePointer].Last {
// The current rule interval is behind, so we can move to the next.
rulePointer++
} else {
// Both intervals end at the same position, we can move to the next succeeded, and next rule.
succeededPointer++
rulePointer++
}
}
return contains == ruleIndexes.total() || (succeededCount != nil && contains >= int(*succeededCount))
}

View File

@@ -0,0 +1,370 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package job
import (
"testing"
"github.com/google/go-cmp/cmp"
batch "k8s.io/api/batch/v1"
"k8s.io/klog/v2/ktesting"
"k8s.io/utils/ptr"
)
func TestMatchSuccessPolicy(t *testing.T) {
testCases := map[string]struct {
successPolicy *batch.SuccessPolicy
completions int32
succeededIndexes orderedIntervals
wantMessage string
wantMetSuccessPolicy bool
}{
"successPolicy is null": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 0}},
},
"any rules are nothing": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 0}},
successPolicy: &batch.SuccessPolicy{Rules: []batch.SuccessPolicyRule{}},
},
"rules.succeededIndexes is invalid format": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 0}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("invalid-form"),
}},
},
},
"rules.succeededIndexes is specified; succeededIndexes matched rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}, {4, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-2"),
}},
},
wantMessage: "Matched rules at index 0",
wantMetSuccessPolicy: true,
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("3"),
}},
},
},
"rules.succeededCount is specified; succeededIndexes matched rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](2),
}},
},
wantMessage: "Matched rules at index 0",
wantMetSuccessPolicy: true,
},
"rules.succeededCount is specified; succeededIndexes didn't match rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](4),
}},
},
},
"multiple rules; rules.succeededIndexes is specified; succeededIndexes met one of rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}, {4, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{
{
SucceededIndexes: ptr.To("9"),
},
{
SucceededIndexes: ptr.To("4,6"),
},
},
},
wantMessage: "Matched rules at index 1",
wantMetSuccessPolicy: true,
},
"multiple rules; rules.succeededIndexes is specified; succeededIndexes met all rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}, {4, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{
{
SucceededIndexes: ptr.To("0,1"),
},
{
SucceededIndexes: ptr.To("5"),
},
},
},
wantMessage: "Matched rules at index 0",
wantMetSuccessPolicy: true,
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes met all rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}, {4, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("3-6"),
SucceededCount: ptr.To[int32](2),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes didn't match rules": {
completions: 10,
succeededIndexes: orderedIntervals{{0, 2}, {6, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("3-6"),
SucceededCount: ptr.To[int32](2),
}},
},
},
"rules.succeededIndexes is specified; succeededIndexes are nothing": {
completions: 10,
succeededIndexes: orderedIntervals{},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](4),
}},
},
},
"rules.succeededIndexes is specified; succeededIndexes matched rules; rules is proper subset of succeededIndexes": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 5}, {6, 9}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("2-4,6-8"),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes matched rules; rules equals succeededIndexes": {
completions: 10,
succeededIndexes: orderedIntervals{{2, 4}, {6, 9}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("2-4,6-9"),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes matched rules; rules is subset of succeededIndexes": {
completions: 10,
succeededIndexes: orderedIntervals{{2, 5}, {7, 15}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("2-4,8-12"),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; rules is an empty set": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To(""),
}},
},
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; succeededIndexes is an empty set": {
completions: 10,
succeededIndexes: orderedIntervals{},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To(""),
}},
},
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; rules and succeededIndexes are empty set": {
completions: 10,
succeededIndexes: orderedIntervals{},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To(""),
}},
},
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; all elements of rules.succeededIndexes aren't included in succeededIndexes": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}, {5, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("10-12,14-16"),
}},
},
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; rules overlaps succeededIndexes at first": {
completions: 10,
succeededIndexes: orderedIntervals{{2, 4}, {6, 8}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("1-3,5-7"),
}},
},
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes matched rules; rules overlaps succeededIndexes at first": {
completions: 10,
succeededIndexes: orderedIntervals{{2, 4}, {6, 8}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("1-3,5-7"),
SucceededCount: ptr.To[int32](4),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; rules overlaps succeededIndexes at last": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}, {5, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("2-4,6-9"),
}},
},
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes matched rules; rules overlaps succeededIndexes at last": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}, {5, 7}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("2-4,6-9"),
SucceededCount: ptr.To[int32](4),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; rules completely overlaps succeededIndexes": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}, {7, 8}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-4,6-9"),
}},
},
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes matched rules; rules completely overlaps succeededIndexes": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}, {7, 8}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-4,6-9"),
SucceededCount: ptr.To[int32](5),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; rules overlaps multiple succeededIndexes at last": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}, {5, 9}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-6,8-9"),
}},
},
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes matched rules; rules overlaps multiple succeededIndexes at last": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 3}, {5, 9}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-6,8-9"),
SucceededCount: ptr.To[int32](7),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; rules overlaps succeededIndexes at first, and rules equals succeededIndexes at last": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 5}, {7, 10}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-5,7-9"),
}},
},
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes matched rules; rules overlaps succeededIndexes at first, and rules equals succeededIndexes at last": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 5}, {7, 10}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-5,7-9"),
SucceededCount: ptr.To[int32](8),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
"rules.succeededIndexes is specified; succeededIndexes didn't match rules; the first rules overlaps succeededIndexes at first": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 10}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-3,6-9"),
}},
},
},
"rules.succeededIndexes and rules.succeededCount are specified; succeededIndexes matched rules; the first rules overlaps succeededIndexes at first": {
completions: 10,
succeededIndexes: orderedIntervals{{1, 10}},
successPolicy: &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-3,6-9"),
SucceededCount: ptr.To[int32](7),
}},
},
wantMetSuccessPolicy: true,
wantMessage: "Matched rules at index 0",
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
logger := ktesting.NewLogger(t,
ktesting.NewConfig(
ktesting.BufferLogs(true),
),
)
gotMessage, gotMetSuccessPolicy := matchSuccessPolicy(logger, tc.successPolicy, tc.completions, tc.succeededIndexes)
if tc.wantMetSuccessPolicy != gotMetSuccessPolicy {
t.Errorf("Unexpected bool from matchSuccessPolicy\nwant:%v\ngot:%v\n", tc.wantMetSuccessPolicy, gotMetSuccessPolicy)
}
if diff := cmp.Diff(tc.wantMessage, gotMessage); diff != "" {
t.Errorf("Unexpected message from matchSuccessPolicy (-want,+got):\n%s", diff)
}
})
}
}

View File

@@ -378,6 +378,15 @@ const (
// Allow users to specify recreating pods of a job only when // Allow users to specify recreating pods of a job only when
// pods have fully terminated. // pods have fully terminated.
JobPodReplacementPolicy featuregate.Feature = "JobPodReplacementPolicy" JobPodReplacementPolicy featuregate.Feature = "JobPodReplacementPolicy"
// owner: @tenzen-y
// kep: https://kep.k8s.io/3998
// alpha: v1.30
//
// Allow users to specify when a Job can be declared as succeeded
// based on the set of succeeded pods.
JobSuccessPolicy featuregate.Feature = "JobSuccessPolicy"
// owner: @alculquicondor // owner: @alculquicondor
// alpha: v1.23 // alpha: v1.23
// beta: v1.24 // beta: v1.24
@@ -1077,6 +1086,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta}, JobPodReplacementPolicy: {Default: true, PreRelease: featuregate.Beta},
JobSuccessPolicy: {Default: false, PreRelease: featuregate.Alpha},
JobReadyPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31 JobReadyPods: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.31
KubeletCgroupDriverFromCRI: {Default: false, PreRelease: featuregate.Alpha}, KubeletCgroupDriverFromCRI: {Default: false, PreRelease: featuregate.Alpha},

View File

@@ -349,6 +349,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
"k8s.io/api/batch/v1.PodFailurePolicyOnExitCodesRequirement": schema_k8sio_api_batch_v1_PodFailurePolicyOnExitCodesRequirement(ref), "k8s.io/api/batch/v1.PodFailurePolicyOnExitCodesRequirement": schema_k8sio_api_batch_v1_PodFailurePolicyOnExitCodesRequirement(ref),
"k8s.io/api/batch/v1.PodFailurePolicyOnPodConditionsPattern": schema_k8sio_api_batch_v1_PodFailurePolicyOnPodConditionsPattern(ref), "k8s.io/api/batch/v1.PodFailurePolicyOnPodConditionsPattern": schema_k8sio_api_batch_v1_PodFailurePolicyOnPodConditionsPattern(ref),
"k8s.io/api/batch/v1.PodFailurePolicyRule": schema_k8sio_api_batch_v1_PodFailurePolicyRule(ref), "k8s.io/api/batch/v1.PodFailurePolicyRule": schema_k8sio_api_batch_v1_PodFailurePolicyRule(ref),
"k8s.io/api/batch/v1.SuccessPolicy": schema_k8sio_api_batch_v1_SuccessPolicy(ref),
"k8s.io/api/batch/v1.SuccessPolicyRule": schema_k8sio_api_batch_v1_SuccessPolicyRule(ref),
"k8s.io/api/batch/v1.UncountedTerminatedPods": schema_k8sio_api_batch_v1_UncountedTerminatedPods(ref), "k8s.io/api/batch/v1.UncountedTerminatedPods": schema_k8sio_api_batch_v1_UncountedTerminatedPods(ref),
"k8s.io/api/batch/v1beta1.CronJob": schema_k8sio_api_batch_v1beta1_CronJob(ref), "k8s.io/api/batch/v1beta1.CronJob": schema_k8sio_api_batch_v1beta1_CronJob(ref),
"k8s.io/api/batch/v1beta1.CronJobList": schema_k8sio_api_batch_v1beta1_CronJobList(ref), "k8s.io/api/batch/v1beta1.CronJobList": schema_k8sio_api_batch_v1beta1_CronJobList(ref),
@@ -17055,6 +17057,12 @@ func schema_k8sio_api_batch_v1_JobSpec(ref common.ReferenceCallback) common.Open
Ref: ref("k8s.io/api/batch/v1.PodFailurePolicy"), Ref: ref("k8s.io/api/batch/v1.PodFailurePolicy"),
}, },
}, },
"successPolicy": {
SchemaProps: spec.SchemaProps{
Description: "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is alpha-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (disabled by default).",
Ref: ref("k8s.io/api/batch/v1.SuccessPolicy"),
},
},
"backoffLimit": { "backoffLimit": {
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Specifies the number of retries before marking this job failed. Defaults to 6", Description: "Specifies the number of retries before marking this job failed. Defaults to 6",
@@ -17138,7 +17146,7 @@ func schema_k8sio_api_batch_v1_JobSpec(ref common.ReferenceCallback) common.Open
}, },
}, },
Dependencies: []string{ Dependencies: []string{
"k8s.io/api/batch/v1.PodFailurePolicy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"}, "k8s.io/api/batch/v1.PodFailurePolicy", "k8s.io/api/batch/v1.SuccessPolicy", "k8s.io/api/core/v1.PodTemplateSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector"},
} }
} }
@@ -17439,6 +17447,68 @@ func schema_k8sio_api_batch_v1_PodFailurePolicyRule(ref common.ReferenceCallback
} }
} }
func schema_k8sio_api_batch_v1_SuccessPolicy(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"rules": {
VendorExtensible: spec.VendorExtensible{
Extensions: spec.Extensions{
"x-kubernetes-list-type": "atomic",
},
},
SchemaProps: spec.SchemaProps{
Description: "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{
SchemaProps: spec.SchemaProps{
Default: map[string]interface{}{},
Ref: ref("k8s.io/api/batch/v1.SuccessPolicyRule"),
},
},
},
},
},
},
Required: []string{"rules"},
},
},
Dependencies: []string{
"k8s.io/api/batch/v1.SuccessPolicyRule"},
}
}
func schema_k8sio_api_batch_v1_SuccessPolicyRule(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{
Schema: spec.Schema{
SchemaProps: spec.SchemaProps{
Description: "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.",
Type: []string{"object"},
Properties: map[string]spec.Schema{
"succeededIndexes": {
SchemaProps: spec.SchemaProps{
Description: "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.",
Type: []string{"string"},
Format: "",
},
},
"succeededCount": {
SchemaProps: spec.SchemaProps{
Description: "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.",
Type: []string{"integer"},
Format: "int32",
},
},
},
},
},
}
}
func schema_k8sio_api_batch_v1_UncountedTerminatedPods(ref common.ReferenceCallback) common.OpenAPIDefinition { func schema_k8sio_api_batch_v1_UncountedTerminatedPods(ref common.ReferenceCallback) common.OpenAPIDefinition {
return common.OpenAPIDefinition{ return common.OpenAPIDefinition{
Schema: spec.Schema{ Schema: spec.Schema{

View File

@@ -104,6 +104,9 @@ func (jobStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
if !utilfeature.DefaultFeatureGate.Enabled(features.JobManagedBy) { if !utilfeature.DefaultFeatureGate.Enabled(features.JobManagedBy) {
job.Spec.ManagedBy = nil job.Spec.ManagedBy = nil
} }
if !utilfeature.DefaultFeatureGate.Enabled(features.JobSuccessPolicy) {
job.Spec.SuccessPolicy = nil
}
if !utilfeature.DefaultFeatureGate.Enabled(features.JobBackoffLimitPerIndex) { if !utilfeature.DefaultFeatureGate.Enabled(features.JobBackoffLimitPerIndex) {
job.Spec.BackoffLimitPerIndex = nil job.Spec.BackoffLimitPerIndex = nil
@@ -137,6 +140,9 @@ func (jobStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object
if !utilfeature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) && oldJob.Spec.PodFailurePolicy == nil { if !utilfeature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) && oldJob.Spec.PodFailurePolicy == nil {
newJob.Spec.PodFailurePolicy = nil newJob.Spec.PodFailurePolicy = nil
} }
if !utilfeature.DefaultFeatureGate.Enabled(features.JobSuccessPolicy) && oldJob.Spec.SuccessPolicy == nil {
newJob.Spec.SuccessPolicy = nil
}
if !utilfeature.DefaultFeatureGate.Enabled(features.JobBackoffLimitPerIndex) { if !utilfeature.DefaultFeatureGate.Enabled(features.JobBackoffLimitPerIndex) {
if oldJob.Spec.BackoffLimitPerIndex == nil { if oldJob.Spec.BackoffLimitPerIndex == nil {

View File

@@ -41,7 +41,7 @@ import (
var ignoreErrValueDetail = cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail") var ignoreErrValueDetail = cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail")
// TestJobStrategy_PrepareForUpdate tests various scenearios for PrepareForUpdate // TestJobStrategy_PrepareForUpdate tests various scenarios for PrepareForUpdate
func TestJobStrategy_PrepareForUpdate(t *testing.T) { func TestJobStrategy_PrepareForUpdate(t *testing.T) {
validSelector := getValidLabelSelector() validSelector := getValidLabelSelector()
validPodTemplateSpec := getValidPodTemplateSpecForSelector(validSelector) validPodTemplateSpec := getValidPodTemplateSpecForSelector(validSelector)
@@ -70,15 +70,140 @@ func TestJobStrategy_PrepareForUpdate(t *testing.T) {
}, },
}, },
} }
successPolicy := &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{
{
SucceededIndexes: ptr.To("1,3-7"),
SucceededCount: ptr.To[int32](4),
},
},
}
updatedSuccessPolicy := &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{
{
SucceededIndexes: ptr.To("1,3-7"),
SucceededCount: ptr.To[int32](5),
},
},
}
cases := map[string]struct { cases := map[string]struct {
enableJobPodFailurePolicy bool enableJobPodFailurePolicy bool
enableJobBackoffLimitPerIndex bool enableJobBackoffLimitPerIndex bool
enableJobPodReplacementPolicy bool enableJobPodReplacementPolicy bool
enableJobSuccessPolicy bool
job batch.Job job batch.Job
updatedJob batch.Job updatedJob batch.Job
wantJob batch.Job wantJob batch.Job
}{ }{
"update job with a new field; updated when JobSuccessPolicy enabled": {
enableJobSuccessPolicy: true,
job: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: nil,
},
},
updatedJob: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: updatedSuccessPolicy,
},
},
wantJob: batch.Job{
ObjectMeta: getValidObjectMeta(1),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: updatedSuccessPolicy,
},
},
},
"update pre-existing field; updated when JobSuccessPolicy enabled": {
enableJobSuccessPolicy: true,
job: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: successPolicy,
},
},
updatedJob: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: updatedSuccessPolicy,
},
},
wantJob: batch.Job{
ObjectMeta: getValidObjectMeta(1),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: updatedSuccessPolicy,
},
},
},
"update job with a new field: not update when JobSuccessPolicy disabled": {
enableJobSuccessPolicy: false,
job: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: nil,
},
},
updatedJob: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: updatedSuccessPolicy,
},
},
wantJob: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: nil,
},
},
},
"update pre-existing field; updated when JobSuccessPolicy disabled": {
enableJobSuccessPolicy: false,
job: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: successPolicy,
},
},
updatedJob: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: updatedSuccessPolicy,
},
},
wantJob: batch.Job{
ObjectMeta: getValidObjectMeta(1),
Spec: batch.JobSpec{
Selector: validSelector,
Template: validPodTemplateSpec,
SuccessPolicy: updatedSuccessPolicy,
},
},
},
"update job with a new field; updated when JobBackoffLimitPerIndex enabled": { "update job with a new field; updated when JobBackoffLimitPerIndex enabled": {
enableJobBackoffLimitPerIndex: true, enableJobBackoffLimitPerIndex: true,
job: batch.Job{ job: batch.Job{
@@ -447,6 +572,7 @@ func TestJobStrategy_PrepareForUpdate(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodFailurePolicy, tc.enableJobPodFailurePolicy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)()
ctx := genericapirequest.NewDefaultContext() ctx := genericapirequest.NewDefaultContext()
Strategy.PrepareForUpdate(ctx, &tc.updatedJob, &tc.job) Strategy.PrepareForUpdate(ctx, &tc.updatedJob, &tc.job)
@@ -477,12 +603,21 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) {
}, },
}, },
} }
successPolicy := &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{
{
SucceededIndexes: ptr.To("1,3-7"),
SucceededCount: ptr.To[int32](4),
},
},
}
cases := map[string]struct { cases := map[string]struct {
enableJobPodFailurePolicy bool enableJobPodFailurePolicy bool
enableJobBackoffLimitPerIndex bool enableJobBackoffLimitPerIndex bool
enableJobPodReplacementPolicy bool enableJobPodReplacementPolicy bool
enableJobManageBy bool enableJobManageBy bool
enableJobSuccessPolicy bool
job batch.Job job batch.Job
wantJob batch.Job wantJob batch.Job
}{ }{
@@ -504,6 +639,48 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) {
}, },
}, },
}, },
"create job with a new field; JobSuccessPolicy enabled": {
enableJobSuccessPolicy: true,
job: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
ManualSelector: ptr.To(false),
Template: validPodTemplateSpec,
SuccessPolicy: successPolicy,
},
},
wantJob: batch.Job{
ObjectMeta: getValidObjectMeta(1),
Spec: batch.JobSpec{
Selector: validSelector,
ManualSelector: ptr.To(false),
Template: expectedPodTemplateSpec,
SuccessPolicy: successPolicy,
},
},
},
"create job with a new field; JobSuccessPolicy disabled": {
enableJobSuccessPolicy: false,
job: batch.Job{
ObjectMeta: getValidObjectMeta(0),
Spec: batch.JobSpec{
Selector: validSelector,
ManualSelector: ptr.To(false),
Template: validPodTemplateSpec,
SuccessPolicy: successPolicy,
},
},
wantJob: batch.Job{
ObjectMeta: getValidObjectMeta(1),
Spec: batch.JobSpec{
Selector: validSelector,
ManualSelector: ptr.To(false),
Template: validPodTemplateSpec,
SuccessPolicy: nil,
},
},
},
"create job with a new fields; JobBackoffLimitPerIndex enabled": { "create job with a new fields; JobBackoffLimitPerIndex enabled": {
enableJobBackoffLimitPerIndex: true, enableJobBackoffLimitPerIndex: true,
job: batch.Job{ job: batch.Job{
@@ -803,6 +980,7 @@ func TestJobStrategy_PrepareForCreate(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableJobBackoffLimitPerIndex)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobPodReplacementPolicy, tc.enableJobPodReplacementPolicy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManageBy)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManageBy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)()
ctx := genericapirequest.NewDefaultContext() ctx := genericapirequest.NewDefaultContext()
Strategy.PrepareForCreate(ctx, &tc.job) Strategy.PrepareForCreate(ctx, &tc.job)
@@ -1909,11 +2087,17 @@ func TestStatusStrategy_ValidateUpdate(t *testing.T) {
Namespace: metav1.NamespaceDefault, Namespace: metav1.NamespaceDefault,
ResourceVersion: "10", ResourceVersion: "10",
} }
validSuccessPolicy := &batch.SuccessPolicy{
Rules: []batch.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0-2"),
}},
}
now := metav1.Now() now := metav1.Now()
nowPlusMinute := metav1.Time{Time: now.Add(time.Minute)} nowPlusMinute := metav1.Time{Time: now.Add(time.Minute)}
cases := map[string]struct { cases := map[string]struct {
enableJobManagedBy bool enableJobManagedBy bool
enableJobSuccessPolicy bool
job *batch.Job job *batch.Job
newJob *batch.Job newJob *batch.Job
@@ -2871,10 +3055,393 @@ func TestStatusStrategy_ValidateUpdate(t *testing.T) {
}, },
}, },
}, },
"invalid addition of SuccessCriteriaMet for NonIndexed Job": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
SuccessPolicy: validSuccessPolicy,
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
}},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid addition of SuccessCriteriaMet for Job with Failed": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobFailed,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobFailed,
Status: api.ConditionTrue,
},
{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
},
},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid addition of Failed for Job with SuccessCriteriaMet": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
},
{
Type: batch.JobFailed,
Status: api.ConditionTrue,
},
},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid addition of SuccessCriteriaMet for Job with FailureTarget": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobFailureTarget,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobFailureTarget,
Status: api.ConditionTrue,
},
{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
},
},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid addition of FailureTarget for Job with SuccessCriteriaMet": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
},
{
Type: batch.JobFailureTarget,
Status: api.ConditionTrue,
},
},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid addition of SuccessCriteriaMet for Job with Complete": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobComplete,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobComplete,
Status: api.ConditionTrue,
},
{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
},
},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"valid addition of Complete for Job with SuccessCriteriaMet": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
},
{
Type: batch.JobComplete,
Status: api.ConditionTrue,
},
},
},
},
},
"invalid addition of SuccessCriteriaMet for Job without SuccessPolicy": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
},
},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid addition of Complete for Job with SuccessPolicy unless SuccessCriteriaMet": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{
{
Type: batch.JobComplete,
Status: api.ConditionTrue,
},
},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid disabling of SuccessCriteriaMet for Job": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobComplete,
Status: api.ConditionFalse,
}},
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
"invalid removing of SuccessCriteriaMet for Job": {
enableJobSuccessPolicy: true,
job: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
Status: batch.JobStatus{
Conditions: []batch.JobCondition{{
Type: batch.JobSuccessCriteriaMet,
Status: api.ConditionTrue,
}},
},
},
newJob: &batch.Job{
ObjectMeta: validObjectMeta,
Spec: batch.JobSpec{
CompletionMode: completionModePtr(batch.IndexedCompletion),
Completions: ptr.To[int32](10),
SuccessPolicy: validSuccessPolicy,
},
},
wantErrs: field.ErrorList{
{Type: field.ErrorTypeInvalid, Field: "status.conditions"},
},
},
} }
for name, tc := range cases { for name, tc := range cases {
t.Run(name, func(t *testing.T) { t.Run(name, func(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManagedBy)() defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobManagedBy, tc.enableJobManagedBy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)()
errs := StatusStrategy.ValidateUpdate(ctx, tc.newJob, tc.job) errs := StatusStrategy.ValidateUpdate(ctx, tc.newJob, tc.job)
if diff := cmp.Diff(tc.wantErrs, errs, ignoreErrValueDetail); diff != "" { if diff := cmp.Diff(tc.wantErrs, errs, ignoreErrValueDetail); diff != "" {
t.Errorf("Unexpected errors (-want,+got):\n%s", diff) t.Errorf("Unexpected errors (-want,+got):\n%s", diff)

View File

@@ -444,10 +444,66 @@ func (m *PodFailurePolicyRule) XXX_DiscardUnknown() {
var xxx_messageInfo_PodFailurePolicyRule proto.InternalMessageInfo var xxx_messageInfo_PodFailurePolicyRule proto.InternalMessageInfo
func (m *SuccessPolicy) Reset() { *m = SuccessPolicy{} }
func (*SuccessPolicy) ProtoMessage() {}
func (*SuccessPolicy) Descriptor() ([]byte, []int) {
return fileDescriptor_79228dc2c4001a22, []int{14}
}
func (m *SuccessPolicy) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SuccessPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SuccessPolicy) XXX_Merge(src proto.Message) {
xxx_messageInfo_SuccessPolicy.Merge(m, src)
}
func (m *SuccessPolicy) XXX_Size() int {
return m.Size()
}
func (m *SuccessPolicy) XXX_DiscardUnknown() {
xxx_messageInfo_SuccessPolicy.DiscardUnknown(m)
}
var xxx_messageInfo_SuccessPolicy proto.InternalMessageInfo
func (m *SuccessPolicyRule) Reset() { *m = SuccessPolicyRule{} }
func (*SuccessPolicyRule) ProtoMessage() {}
func (*SuccessPolicyRule) Descriptor() ([]byte, []int) {
return fileDescriptor_79228dc2c4001a22, []int{15}
}
func (m *SuccessPolicyRule) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *SuccessPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *SuccessPolicyRule) XXX_Merge(src proto.Message) {
xxx_messageInfo_SuccessPolicyRule.Merge(m, src)
}
func (m *SuccessPolicyRule) XXX_Size() int {
return m.Size()
}
func (m *SuccessPolicyRule) XXX_DiscardUnknown() {
xxx_messageInfo_SuccessPolicyRule.DiscardUnknown(m)
}
var xxx_messageInfo_SuccessPolicyRule proto.InternalMessageInfo
func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} } func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} }
func (*UncountedTerminatedPods) ProtoMessage() {} func (*UncountedTerminatedPods) ProtoMessage() {}
func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) { func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) {
return fileDescriptor_79228dc2c4001a22, []int{14} return fileDescriptor_79228dc2c4001a22, []int{16}
} }
func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error { func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b) return m.Unmarshal(b)
@@ -487,6 +543,8 @@ func init() {
proto.RegisterType((*PodFailurePolicyOnExitCodesRequirement)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement") proto.RegisterType((*PodFailurePolicyOnExitCodesRequirement)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnExitCodesRequirement")
proto.RegisterType((*PodFailurePolicyOnPodConditionsPattern)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern") proto.RegisterType((*PodFailurePolicyOnPodConditionsPattern)(nil), "k8s.io.api.batch.v1.PodFailurePolicyOnPodConditionsPattern")
proto.RegisterType((*PodFailurePolicyRule)(nil), "k8s.io.api.batch.v1.PodFailurePolicyRule") proto.RegisterType((*PodFailurePolicyRule)(nil), "k8s.io.api.batch.v1.PodFailurePolicyRule")
proto.RegisterType((*SuccessPolicy)(nil), "k8s.io.api.batch.v1.SuccessPolicy")
proto.RegisterType((*SuccessPolicyRule)(nil), "k8s.io.api.batch.v1.SuccessPolicyRule")
proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods") proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods")
} }
@@ -495,120 +553,125 @@ func init() {
} }
var fileDescriptor_79228dc2c4001a22 = []byte{ var fileDescriptor_79228dc2c4001a22 = []byte{
// 1804 bytes of a gzipped FileDescriptorProto // 1882 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x6f, 0xe4, 0x48, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0xdb, 0xc8,
0x15, 0x4f, 0x27, 0xe9, 0xa4, 0xbb, 0x3a, 0x7f, 0x7a, 0x6a, 0x32, 0x33, 0x4d, 0x58, 0xb5, 0xb3, 0x15, 0x37, 0x6d, 0xcb, 0x96, 0x46, 0xfe, 0x90, 0x27, 0x4e, 0xa2, 0xba, 0x0b, 0xd1, 0xab, 0xec,
0x3d, 0xbb, 0xab, 0x2c, 0x2c, 0xee, 0x9d, 0xec, 0x88, 0xe5, 0x8f, 0x40, 0x3b, 0xce, 0x30, 0xcb, 0x06, 0xde, 0x76, 0x2b, 0x6d, 0xbc, 0x41, 0xb7, 0x1f, 0x68, 0xb1, 0xa1, 0xd2, 0x6c, 0xe3, 0x95,
0x84, 0xce, 0x4e, 0x53, 0x9d, 0x01, 0x69, 0x59, 0x10, 0xd5, 0x76, 0x75, 0xc7, 0x3b, 0xb6, 0xcb, 0x37, 0xea, 0xc8, 0x69, 0x81, 0xdd, 0xb4, 0xe8, 0x88, 0x1c, 0xc9, 0xdc, 0x50, 0x1c, 0x96, 0x1c,
0xd8, 0xe5, 0x68, 0x72, 0x41, 0x48, 0x7c, 0x01, 0x3e, 0x05, 0x47, 0x2e, 0x70, 0x44, 0x70, 0x43, 0x1a, 0xf1, 0xa5, 0x28, 0xd0, 0x7f, 0xa0, 0x3d, 0xf6, 0x1f, 0xe8, 0xb1, 0x97, 0xf6, 0xdc, 0xde,
0x39, 0xae, 0x38, 0xad, 0x38, 0x58, 0x8c, 0xf9, 0x00, 0xdc, 0x83, 0x90, 0x50, 0x95, 0xcb, 0x7f, 0x8a, 0x1c, 0x17, 0x3d, 0x2d, 0x7a, 0x20, 0x1a, 0xf6, 0x0f, 0xe8, 0xdd, 0x45, 0x81, 0x62, 0x86,
0xdb, 0x0e, 0x99, 0x95, 0x18, 0x71, 0x8b, 0xdf, 0xfb, 0xbd, 0xdf, 0x7b, 0x55, 0xef, 0xd5, 0x7b, 0xc3, 0x4f, 0x91, 0x5e, 0x67, 0x81, 0x06, 0xbd, 0x89, 0xef, 0xfd, 0xde, 0x6f, 0x1e, 0xe7, 0x7d,
0x2f, 0x0d, 0xee, 0x3e, 0xfb, 0x86, 0xaf, 0x9a, 0x74, 0x88, 0x5d, 0x73, 0x38, 0xc5, 0x4c, 0x3f, 0x52, 0xe0, 0xd6, 0xd3, 0x6f, 0x79, 0x3d, 0x93, 0xf6, 0xb1, 0x63, 0xf6, 0x27, 0x98, 0xe9, 0xa7,
0x1d, 0x9e, 0xdd, 0x1b, 0xce, 0x89, 0x43, 0x3c, 0xcc, 0x88, 0xa1, 0xba, 0x1e, 0x65, 0x14, 0xde, 0xfd, 0xb3, 0x3b, 0xfd, 0x19, 0xb1, 0x89, 0x8b, 0x19, 0x31, 0x7a, 0x8e, 0x4b, 0x19, 0x85, 0xd7,
0x8c, 0x41, 0x2a, 0x76, 0x4d, 0x55, 0x80, 0xd4, 0xb3, 0x7b, 0xbb, 0x5f, 0x9b, 0x9b, 0xec, 0x34, 0x22, 0x50, 0x0f, 0x3b, 0x66, 0x4f, 0x80, 0x7a, 0x67, 0x77, 0xf6, 0xbe, 0x31, 0x33, 0xd9, 0xa9,
0x98, 0xaa, 0x3a, 0xb5, 0x87, 0x73, 0x3a, 0xa7, 0x43, 0x81, 0x9d, 0x06, 0x33, 0xf1, 0x25, 0x3e, 0x3f, 0xe9, 0xe9, 0x74, 0xde, 0x9f, 0xd1, 0x19, 0xed, 0x0b, 0xec, 0xc4, 0x9f, 0x8a, 0x27, 0xf1,
0xc4, 0x5f, 0x31, 0xc7, 0xee, 0x20, 0xe7, 0x48, 0xa7, 0x1e, 0xa9, 0xf0, 0xb3, 0x7b, 0x3f, 0xc3, 0x20, 0x7e, 0x45, 0x1c, 0x7b, 0xdd, 0xcc, 0x41, 0x3a, 0x75, 0x49, 0xc9, 0x39, 0x7b, 0x77, 0x53,
0xd8, 0x58, 0x3f, 0x35, 0x1d, 0xe2, 0x9d, 0x0f, 0xdd, 0x67, 0x73, 0x2e, 0xf0, 0x87, 0x36, 0x61, 0xcc, 0x1c, 0xeb, 0xa7, 0xa6, 0x4d, 0xdc, 0xf3, 0xbe, 0xf3, 0x74, 0xc6, 0x05, 0x5e, 0x7f, 0x4e,
0xb8, 0xca, 0x6a, 0x58, 0x67, 0xe5, 0x05, 0x0e, 0x33, 0x6d, 0xb2, 0x60, 0xf0, 0xf5, 0xff, 0x66, 0x18, 0x2e, 0xb3, 0xea, 0x57, 0x59, 0xb9, 0xbe, 0xcd, 0xcc, 0x39, 0x59, 0x30, 0xf8, 0xe6, 0x17,
0xe0, 0xeb, 0xa7, 0xc4, 0xc6, 0x65, 0xbb, 0xc1, 0xbf, 0x1a, 0x60, 0xfd, 0xd0, 0xa3, 0xce, 0x11, 0x19, 0x78, 0xfa, 0x29, 0x99, 0xe3, 0xa2, 0x5d, 0xf7, 0xdf, 0x0a, 0x58, 0x1f, 0xb8, 0xd4, 0x3e,
0x9d, 0xc2, 0x9f, 0x83, 0x16, 0x8f, 0xc7, 0xc0, 0x0c, 0xf7, 0x1a, 0x7b, 0x8d, 0xfd, 0xce, 0xc1, 0xa2, 0x13, 0xf8, 0x73, 0x50, 0xe7, 0xfe, 0x18, 0x98, 0xe1, 0xb6, 0xb2, 0xaf, 0x1c, 0x34, 0x0f,
0xbb, 0x6a, 0x76, 0x4b, 0x29, 0xad, 0xea, 0x3e, 0x9b, 0x73, 0x81, 0xaf, 0x72, 0xb4, 0x7a, 0x76, 0xdf, 0xe9, 0xa5, 0xb7, 0x94, 0xd0, 0xf6, 0x9c, 0xa7, 0x33, 0x2e, 0xf0, 0x7a, 0x1c, 0xdd, 0x3b,
0x4f, 0x7d, 0x32, 0xfd, 0x94, 0xe8, 0xec, 0x98, 0x30, 0xac, 0xc1, 0x8b, 0x50, 0x59, 0x8a, 0x42, 0xbb, 0xd3, 0x7b, 0x34, 0xf9, 0x94, 0xe8, 0xec, 0x98, 0x30, 0xac, 0xc1, 0xe7, 0x81, 0xba, 0x14,
0x05, 0x64, 0x32, 0x94, 0xb2, 0x42, 0x0d, 0xac, 0xfa, 0x2e, 0xd1, 0x7b, 0xcb, 0x82, 0x7d, 0x4f, 0x06, 0x2a, 0x48, 0x65, 0x28, 0x61, 0x85, 0x1a, 0x58, 0xf5, 0x1c, 0xa2, 0xb7, 0x97, 0x05, 0xfb,
0xad, 0xc8, 0x81, 0x2a, 0xa3, 0x99, 0xb8, 0x44, 0xd7, 0x36, 0x24, 0xdb, 0x2a, 0xff, 0x42, 0xc2, 0x7e, 0xaf, 0x24, 0x06, 0x3d, 0xe9, 0xcd, 0xd8, 0x21, 0xba, 0xb6, 0x21, 0xd9, 0x56, 0xf9, 0x13,
0x16, 0x1e, 0x81, 0x35, 0x9f, 0x61, 0x16, 0xf8, 0xbd, 0x15, 0xc1, 0x32, 0xb8, 0x92, 0x45, 0x20, 0x12, 0xb6, 0xf0, 0x08, 0xac, 0x79, 0x0c, 0x33, 0xdf, 0x6b, 0xaf, 0x08, 0x96, 0xee, 0xa5, 0x2c,
0xb5, 0x2d, 0xc9, 0xb3, 0x16, 0x7f, 0x23, 0xc9, 0x30, 0xf8, 0x5d, 0x03, 0x74, 0x24, 0x72, 0x64, 0x02, 0xa9, 0x6d, 0x49, 0x9e, 0xb5, 0xe8, 0x19, 0x49, 0x86, 0xee, 0x1f, 0x14, 0xd0, 0x94, 0xc8,
0xfa, 0x0c, 0x7e, 0xb2, 0x70, 0x03, 0xea, 0xf5, 0x6e, 0x80, 0x5b, 0x8b, 0xf3, 0x77, 0xa5, 0xa7, 0xa1, 0xe9, 0x31, 0xf8, 0x64, 0xe1, 0x06, 0x7a, 0x57, 0xbb, 0x01, 0x6e, 0x2d, 0xde, 0xbf, 0x25,
0x56, 0x22, 0xc9, 0x9d, 0xfe, 0x01, 0x68, 0x9a, 0x8c, 0xd8, 0x7e, 0x6f, 0x79, 0x6f, 0x65, 0xbf, 0x4f, 0xaa, 0xc7, 0x92, 0xcc, 0xdb, 0xdf, 0x03, 0x35, 0x93, 0x91, 0xb9, 0xd7, 0x5e, 0xde, 0x5f,
0x73, 0xf0, 0xda, 0x55, 0x81, 0x6b, 0x9b, 0x92, 0xa8, 0xf9, 0x98, 0x9b, 0xa0, 0xd8, 0x72, 0xf0, 0x39, 0x68, 0x1e, 0xbe, 0x76, 0x99, 0xe3, 0xda, 0xa6, 0x24, 0xaa, 0x3d, 0xe4, 0x26, 0x28, 0xb2,
0xd7, 0xd5, 0x34, 0x60, 0x7e, 0x25, 0xf0, 0x1d, 0xd0, 0xe2, 0x89, 0x35, 0x02, 0x8b, 0x88, 0x80, 0xec, 0xfe, 0x6d, 0x35, 0x71, 0x98, 0x5f, 0x09, 0x7c, 0x1b, 0xd4, 0x79, 0x60, 0x0d, 0xdf, 0x22,
0xdb, 0x59, 0x00, 0x13, 0x29, 0x47, 0x29, 0x02, 0xee, 0x83, 0x16, 0xaf, 0x85, 0x8f, 0xa9, 0x43, 0xc2, 0xe1, 0x46, 0xea, 0xc0, 0x58, 0xca, 0x51, 0x82, 0x80, 0x07, 0xa0, 0xce, 0x73, 0xe1, 0x63,
0x7a, 0x2d, 0x81, 0xde, 0xe0, 0xc8, 0x13, 0x29, 0x43, 0xa9, 0x16, 0x3e, 0x05, 0x77, 0x7c, 0x86, 0x6a, 0x93, 0x76, 0x5d, 0xa0, 0x37, 0x38, 0xf2, 0x44, 0xca, 0x50, 0xa2, 0x85, 0x8f, 0xc1, 0x4d,
0x3d, 0x66, 0x3a, 0xf3, 0x87, 0x04, 0x1b, 0x96, 0xe9, 0x90, 0x09, 0xd1, 0xa9, 0x63, 0xf8, 0x22, 0x8f, 0x61, 0x97, 0x99, 0xf6, 0xec, 0x3e, 0xc1, 0x86, 0x65, 0xda, 0x64, 0x4c, 0x74, 0x6a, 0x1b,
0x77, 0x2b, 0xda, 0x97, 0xa3, 0x50, 0xb9, 0x33, 0xa9, 0x86, 0xa0, 0x3a, 0x5b, 0xf8, 0x09, 0xb8, 0x9e, 0x88, 0xdd, 0x8a, 0xf6, 0xd5, 0x30, 0x50, 0x6f, 0x8e, 0xcb, 0x21, 0xa8, 0xca, 0x16, 0x3e,
0xa1, 0x53, 0x47, 0x0f, 0x3c, 0x8f, 0x38, 0xfa, 0xf9, 0x98, 0x5a, 0xa6, 0x7e, 0x2e, 0xd2, 0xd8, 0x01, 0x3b, 0x3a, 0xb5, 0x75, 0xdf, 0x75, 0x89, 0xad, 0x9f, 0x8f, 0xa8, 0x65, 0xea, 0xe7, 0x22,
0xd6, 0x54, 0x19, 0xf7, 0x8d, 0xc3, 0x32, 0xe0, 0xb2, 0x4a, 0x88, 0x16, 0x89, 0xe0, 0x9b, 0x60, 0x8c, 0x0d, 0xad, 0x27, 0xfd, 0xde, 0x19, 0x14, 0x01, 0x17, 0x65, 0x42, 0xb4, 0x48, 0x04, 0xdf,
0xdd, 0x0f, 0x7c, 0x97, 0x38, 0x46, 0x6f, 0x75, 0xaf, 0xb1, 0xdf, 0xd2, 0x3a, 0x51, 0xa8, 0xac, 0x04, 0xeb, 0x9e, 0xef, 0x39, 0xc4, 0x36, 0xda, 0xab, 0xfb, 0xca, 0x41, 0x5d, 0x6b, 0x86, 0x81,
0x4f, 0x62, 0x11, 0x4a, 0x74, 0xf0, 0x27, 0xa0, 0xf3, 0x29, 0x9d, 0x9e, 0x10, 0xdb, 0xb5, 0x30, 0xba, 0x3e, 0x8e, 0x44, 0x28, 0xd6, 0xc1, 0x4f, 0x40, 0xf3, 0x53, 0x3a, 0x39, 0x21, 0x73, 0xc7,
0x23, 0xbd, 0xa6, 0xc8, 0xf3, 0x1b, 0x95, 0xc9, 0x38, 0xca, 0x70, 0xa2, 0x1e, 0x6f, 0xca, 0x20, 0xc2, 0x8c, 0xb4, 0x6b, 0x22, 0xce, 0x6f, 0x94, 0x06, 0xe3, 0x28, 0xc5, 0x89, 0x7c, 0xbc, 0x26,
0x3b, 0x39, 0x05, 0xca, 0xb3, 0xc1, 0x9f, 0x81, 0x5d, 0x3f, 0xd0, 0x75, 0xe2, 0xfb, 0xb3, 0xc0, 0x9d, 0x6c, 0x66, 0x14, 0x28, 0xcb, 0x06, 0x7f, 0x06, 0xf6, 0x3c, 0x5f, 0xd7, 0x89, 0xe7, 0x4d,
0x3a, 0xa2, 0x53, 0xff, 0xfb, 0xa6, 0xcf, 0xa8, 0x77, 0x3e, 0x32, 0x6d, 0x93, 0xf5, 0xd6, 0xf6, 0x7d, 0xeb, 0x88, 0x4e, 0xbc, 0x1f, 0x9a, 0x1e, 0xa3, 0xee, 0xf9, 0xd0, 0x9c, 0x9b, 0xac, 0xbd,
0x1a, 0xfb, 0x4d, 0xad, 0x1f, 0x85, 0xca, 0xee, 0xa4, 0x16, 0x85, 0xae, 0x60, 0x80, 0x08, 0xdc, 0xb6, 0xaf, 0x1c, 0xd4, 0xb4, 0x4e, 0x18, 0xa8, 0x7b, 0xe3, 0x4a, 0x14, 0xba, 0x84, 0x01, 0x22,
0x9e, 0x61, 0xd3, 0x22, 0xc6, 0x02, 0xf7, 0xba, 0xe0, 0xde, 0x8d, 0x42, 0xe5, 0xf6, 0xa3, 0x4a, 0x70, 0x63, 0x8a, 0x4d, 0x8b, 0x18, 0x0b, 0xdc, 0xeb, 0x82, 0x7b, 0x2f, 0x0c, 0xd4, 0x1b, 0x0f,
0x04, 0xaa, 0xb1, 0x1c, 0xfc, 0x69, 0x19, 0x6c, 0x16, 0xde, 0x0b, 0xfc, 0x01, 0x58, 0xc3, 0x3a, 0x4a, 0x11, 0xa8, 0xc2, 0xb2, 0xfb, 0xe7, 0x65, 0xb0, 0x99, 0xab, 0x17, 0xf8, 0x21, 0x58, 0xc3,
0x33, 0xcf, 0x78, 0x51, 0xf1, 0x52, 0xbd, 0x9b, 0xbf, 0x1d, 0xde, 0xe9, 0xb2, 0x57, 0x8f, 0xc8, 0x3a, 0x33, 0xcf, 0x78, 0x52, 0xf1, 0x54, 0xbd, 0x95, 0xbd, 0x1d, 0xde, 0xe9, 0xd2, 0xaa, 0x47,
0x8c, 0xf0, 0x24, 0x90, 0xec, 0x91, 0x3d, 0x10, 0xa6, 0x48, 0x52, 0x40, 0x0b, 0x74, 0x2d, 0xec, 0x64, 0x4a, 0x78, 0x10, 0x48, 0x5a, 0x64, 0xf7, 0x84, 0x29, 0x92, 0x14, 0xd0, 0x02, 0x2d, 0x0b,
0xb3, 0xa4, 0x1e, 0x79, 0xb5, 0x89, 0xfc, 0x74, 0x0e, 0xbe, 0x72, 0xbd, 0xc7, 0xc5, 0x2d, 0xb4, 0x7b, 0x2c, 0xce, 0x47, 0x9e, 0x6d, 0x22, 0x3e, 0xcd, 0xc3, 0xaf, 0x5d, 0xad, 0xb8, 0xb8, 0x85,
0x9d, 0x28, 0x54, 0xba, 0xa3, 0x12, 0x0f, 0x5a, 0x60, 0x86, 0x1e, 0x80, 0x42, 0x96, 0x5e, 0xa1, 0xb6, 0x1b, 0x06, 0x6a, 0x6b, 0x58, 0xe0, 0x41, 0x0b, 0xcc, 0xd0, 0x05, 0x50, 0xc8, 0x92, 0x2b,
0xf0, 0xd7, 0x7c, 0x69, 0x7f, 0xb7, 0xa3, 0x50, 0x81, 0xa3, 0x05, 0x26, 0x54, 0xc1, 0x3e, 0xf8, 0x14, 0xe7, 0xd5, 0x5e, 0xfa, 0xbc, 0x1b, 0x61, 0xa0, 0xc2, 0xe1, 0x02, 0x13, 0x2a, 0x61, 0xef,
0x67, 0x03, 0xac, 0xbc, 0x9a, 0x06, 0xfa, 0xdd, 0x42, 0x03, 0x7d, 0xad, 0xae, 0x68, 0x6b, 0x9b, 0xfe, 0x4b, 0x01, 0x2b, 0xaf, 0xa6, 0x81, 0x7e, 0x3f, 0xd7, 0x40, 0x5f, 0xab, 0x4a, 0xda, 0xca,
0xe7, 0xa3, 0x52, 0xf3, 0xec, 0xd7, 0x32, 0x5c, 0xdd, 0x38, 0xff, 0xb2, 0x02, 0x36, 0x8e, 0xe8, 0xe6, 0xf9, 0xa0, 0xd0, 0x3c, 0x3b, 0x95, 0x0c, 0x97, 0x37, 0xce, 0xbf, 0xae, 0x80, 0x8d, 0x23,
0xf4, 0x90, 0x3a, 0x86, 0xc9, 0x4c, 0xea, 0xc0, 0xfb, 0x60, 0x95, 0x9d, 0xbb, 0x49, 0x13, 0xda, 0x3a, 0x19, 0x50, 0xdb, 0x30, 0x99, 0x49, 0x6d, 0x78, 0x17, 0xac, 0xb2, 0x73, 0x27, 0x6e, 0x42,
0x4b, 0x5c, 0x9f, 0x9c, 0xbb, 0xe4, 0x32, 0x54, 0xba, 0x79, 0x2c, 0x97, 0x21, 0x81, 0x86, 0xa3, 0xfb, 0xf1, 0xd1, 0x27, 0xe7, 0x0e, 0xb9, 0x08, 0xd4, 0x56, 0x16, 0xcb, 0x65, 0x48, 0xa0, 0xe1,
0x34, 0x9c, 0x65, 0x61, 0x77, 0xbf, 0xe8, 0xee, 0x32, 0x54, 0x2a, 0x46, 0xac, 0x9a, 0x32, 0x15, 0x30, 0x71, 0x67, 0x59, 0xd8, 0xdd, 0xcd, 0x1f, 0x77, 0x11, 0xa8, 0x25, 0x23, 0xb6, 0x97, 0x30,
0x83, 0x82, 0x73, 0xb0, 0xc9, 0x93, 0x33, 0xf6, 0xe8, 0x34, 0xae, 0xb2, 0x95, 0x97, 0xce, 0xfa, 0xe5, 0x9d, 0x82, 0x33, 0xb0, 0xc9, 0x83, 0x33, 0x72, 0xe9, 0x24, 0xca, 0xb2, 0x95, 0x97, 0x8e,
0x2d, 0x19, 0xc0, 0xe6, 0x28, 0x4f, 0x84, 0x8a, 0xbc, 0xf0, 0x2c, 0xae, 0xb1, 0x13, 0x0f, 0x3b, 0xfa, 0x75, 0xe9, 0xc0, 0xe6, 0x30, 0x4b, 0x84, 0xf2, 0xbc, 0xf0, 0x2c, 0xca, 0xb1, 0x13, 0x17,
0x7e, 0x7c, 0xa4, 0x2f, 0x56, 0xd3, 0xbb, 0xd2, 0x9b, 0xa8, 0xb3, 0x22, 0x1b, 0xaa, 0xf0, 0x00, 0xdb, 0x5e, 0xf4, 0x4a, 0x5f, 0x2e, 0xa7, 0xf7, 0xe4, 0x69, 0x22, 0xcf, 0xf2, 0x6c, 0xa8, 0xe4,
0xdf, 0x02, 0x6b, 0x1e, 0xc1, 0x3e, 0x75, 0x44, 0x3d, 0xb7, 0xb3, 0xec, 0x20, 0x21, 0x45, 0x52, 0x04, 0x78, 0x1b, 0xac, 0xb9, 0x04, 0x7b, 0xd4, 0x16, 0xf9, 0xdc, 0x48, 0xa3, 0x83, 0x84, 0x14,
0x0b, 0xdf, 0x06, 0xeb, 0x36, 0xf1, 0x7d, 0x3c, 0x27, 0xa2, 0xe3, 0xb4, 0xb5, 0x6d, 0x09, 0x5c, 0x49, 0x2d, 0x7c, 0x0b, 0xac, 0xcf, 0x89, 0xe7, 0xe1, 0x19, 0x11, 0x1d, 0xa7, 0xa1, 0x6d, 0x4b,
0x3f, 0x8e, 0xc5, 0x28, 0xd1, 0x0f, 0x7e, 0xdb, 0x00, 0xeb, 0xaf, 0x66, 0xfa, 0x7d, 0xa7, 0x38, 0xe0, 0xfa, 0x71, 0x24, 0x46, 0xb1, 0xbe, 0xfb, 0x7b, 0x05, 0xac, 0xbf, 0x9a, 0xe9, 0xf7, 0xbd,
0xfd, 0x7a, 0x75, 0x95, 0x57, 0x33, 0xf9, 0xfe, 0xd8, 0x12, 0x81, 0x8a, 0xa9, 0x77, 0x0f, 0x74, 0xfc, 0xf4, 0x6b, 0x57, 0x65, 0x5e, 0xc5, 0xe4, 0xfb, 0x5d, 0x43, 0x38, 0x2a, 0xa6, 0xde, 0x1d,
0x5c, 0xec, 0x61, 0xcb, 0x22, 0x96, 0xe9, 0xdb, 0x22, 0xd6, 0xa6, 0xb6, 0xcd, 0xfb, 0xf2, 0x38, 0xd0, 0x74, 0xb0, 0x8b, 0x2d, 0x8b, 0x58, 0xa6, 0x37, 0x17, 0xbe, 0xd6, 0xb4, 0x6d, 0xde, 0x97,
0x13, 0xa3, 0x3c, 0x86, 0x9b, 0xe8, 0xd4, 0x76, 0x2d, 0xc2, 0x2f, 0x33, 0x2e, 0x37, 0x69, 0x72, 0x47, 0xa9, 0x18, 0x65, 0x31, 0xdc, 0x44, 0xa7, 0x73, 0xc7, 0x22, 0xfc, 0x32, 0xa3, 0x74, 0x93,
0x98, 0x89, 0x51, 0x1e, 0x03, 0x9f, 0x80, 0x5b, 0x71, 0x07, 0x2b, 0x4f, 0xc0, 0x15, 0x31, 0x01, 0x26, 0x83, 0x54, 0x8c, 0xb2, 0x18, 0xf8, 0x08, 0x5c, 0x8f, 0x3a, 0x58, 0x71, 0x02, 0xae, 0x88,
0xbf, 0x14, 0x85, 0xca, 0xad, 0x07, 0x55, 0x00, 0x54, 0x6d, 0x07, 0xe7, 0xa0, 0xeb, 0x52, 0x83, 0x09, 0xf8, 0x95, 0x30, 0x50, 0xaf, 0xdf, 0x2b, 0x03, 0xa0, 0x72, 0x3b, 0x38, 0x03, 0x2d, 0x87,
0x37, 0xe7, 0xc0, 0x23, 0x72, 0xf8, 0x75, 0xc4, 0x3d, 0xbf, 0x59, 0x79, 0x19, 0xe3, 0x12, 0x38, 0x1a, 0xbc, 0x39, 0xfb, 0x2e, 0x91, 0xc3, 0xaf, 0x29, 0xee, 0xf9, 0xcd, 0xd2, 0xcb, 0x18, 0x15,
0xee, 0x81, 0x65, 0x29, 0x5a, 0x20, 0x85, 0xf7, 0xc1, 0xc6, 0x14, 0xeb, 0xcf, 0xe8, 0x6c, 0x96, 0xc0, 0x51, 0x0f, 0x2c, 0x4a, 0xd1, 0x02, 0x29, 0xfc, 0x04, 0x6c, 0xca, 0x11, 0x22, 0x4f, 0x69,
0x1f, 0x0d, 0xdd, 0x28, 0x54, 0x36, 0xb4, 0x9c, 0x1c, 0x15, 0x50, 0x70, 0x04, 0x76, 0xf2, 0xdf, 0x5d, 0xb2, 0x29, 0x8d, 0xb3, 0x48, 0x6d, 0x87, 0x27, 0x7f, 0x4e, 0x84, 0xf2, 0x5c, 0xf0, 0x2e,
0x63, 0xe2, 0x3d, 0x76, 0x0c, 0xf2, 0xbc, 0xb7, 0x21, 0xac, 0x7b, 0x51, 0xa8, 0xec, 0x68, 0x15, 0xd8, 0x98, 0x60, 0xfd, 0x29, 0x9d, 0x4e, 0xb3, 0x73, 0xa7, 0x15, 0x06, 0xea, 0x86, 0x96, 0x91,
0x7a, 0x54, 0x69, 0x05, 0x3f, 0x00, 0x5d, 0x1b, 0x3f, 0x8f, 0x27, 0x91, 0x90, 0x10, 0xbf, 0xb7, 0xa3, 0x1c, 0x0a, 0x0e, 0xc1, 0x6e, 0xf6, 0x79, 0x44, 0xdc, 0x87, 0xb6, 0x41, 0x9e, 0xb5, 0x37,
0x29, 0x98, 0xc4, 0x29, 0x8e, 0x4b, 0x3a, 0xb4, 0x80, 0x86, 0x3f, 0x05, 0x2d, 0x9f, 0x58, 0x44, 0x84, 0x75, 0x3b, 0x0c, 0xd4, 0x5d, 0xad, 0x44, 0x8f, 0x4a, 0xad, 0xe0, 0xfb, 0xa0, 0x35, 0xc7,
0x67, 0xd4, 0x93, 0x6f, 0xeb, 0xbd, 0x6b, 0x96, 0x23, 0x9e, 0x12, 0x6b, 0x22, 0x4d, 0xe3, 0x15, 0xcf, 0xa2, 0x31, 0x27, 0x24, 0xc4, 0x6b, 0x6f, 0x0a, 0x26, 0x71, 0x45, 0xc7, 0x05, 0x1d, 0x5a,
0x27, 0xf9, 0x42, 0x29, 0x25, 0xfc, 0x16, 0xd8, 0xb2, 0xb1, 0x13, 0xe0, 0x14, 0x29, 0x1e, 0x55, 0x40, 0xc3, 0x9f, 0x82, 0xba, 0x47, 0x2c, 0xa2, 0x33, 0xea, 0xca, 0xc2, 0x7d, 0xf7, 0x8a, 0xb9,
0x4b, 0x83, 0x51, 0xa8, 0x6c, 0x1d, 0x17, 0x34, 0xa8, 0x84, 0x84, 0x3f, 0x04, 0x2d, 0x96, 0xec, 0x8e, 0x27, 0xc4, 0x1a, 0x4b, 0xd3, 0x68, 0x7f, 0x8a, 0x9f, 0x50, 0x42, 0x09, 0xbf, 0x03, 0xb6,
0x0f, 0x6b, 0x22, 0xb4, 0xca, 0x09, 0x39, 0xa6, 0x46, 0x61, 0x7d, 0x48, 0x9f, 0x47, 0xba, 0x3b, 0xe6, 0xd8, 0xf6, 0x71, 0x82, 0x14, 0x15, 0x5b, 0xd7, 0x60, 0x18, 0xa8, 0x5b, 0xc7, 0x39, 0x0d,
0xa4, 0x34, 0x7c, 0xe3, 0x62, 0xcc, 0x92, 0xa5, 0xf2, 0x60, 0xc6, 0x88, 0xf7, 0xc8, 0x74, 0x4c, 0x2a, 0x20, 0xe1, 0x8f, 0x40, 0x9d, 0xc5, 0xcb, 0xc9, 0x9a, 0x70, 0xad, 0x74, 0xfc, 0x8e, 0xa8,
0xff, 0x94, 0x18, 0x62, 0x55, 0x6b, 0xc6, 0x1b, 0xd7, 0xc9, 0xc9, 0xa8, 0x0a, 0x82, 0xea, 0x6c, 0x91, 0xdb, 0x4d, 0x92, 0xda, 0x4b, 0x16, 0x93, 0x84, 0x86, 0xaf, 0x73, 0x8c, 0x59, 0x32, 0x0f,
0xe1, 0x08, 0x6c, 0x65, 0x35, 0x7d, 0x4c, 0x0d, 0xd2, 0x6b, 0x8b, 0x8e, 0xf0, 0x06, 0x3f, 0xe5, 0xef, 0x4d, 0x19, 0x71, 0x1f, 0x98, 0xb6, 0xe9, 0x9d, 0x12, 0x43, 0xec, 0x81, 0xb5, 0x68, 0x9d,
0x61, 0x41, 0x73, 0xb9, 0x20, 0x41, 0x25, 0xdb, 0xfc, 0x86, 0x05, 0xae, 0xd8, 0xb0, 0x0c, 0xb0, 0x3b, 0x39, 0x19, 0x96, 0x41, 0x50, 0x95, 0x2d, 0x1c, 0x82, 0xad, 0xb4, 0x60, 0x8e, 0xa9, 0x41,
0xe3, 0x52, 0x03, 0x11, 0xd7, 0xc2, 0x3a, 0xb1, 0x89, 0xc3, 0x64, 0xb1, 0x6f, 0x09, 0xd7, 0xef, 0xda, 0x0d, 0xd1, 0x6e, 0xde, 0xe0, 0x6f, 0x39, 0xc8, 0x69, 0x2e, 0x16, 0x24, 0xa8, 0x60, 0x9b,
0xf2, 0x4a, 0x1a, 0x57, 0xe8, 0x2f, 0x6b, 0xe4, 0xa8, 0x92, 0x0d, 0x7e, 0x15, 0xb4, 0x6d, 0xec, 0x5d, 0xdf, 0xc0, 0x25, 0xeb, 0x9b, 0x01, 0x76, 0x1d, 0x6a, 0x20, 0xe2, 0x58, 0x58, 0x27, 0x73,
0xe0, 0x39, 0x31, 0xb4, 0xf3, 0xde, 0xb6, 0xa0, 0xde, 0x8c, 0x42, 0xa5, 0x7d, 0x9c, 0x08, 0x51, 0x62, 0x33, 0x99, 0xe3, 0x5b, 0xe2, 0xe8, 0x77, 0x78, 0x26, 0x8d, 0x4a, 0xf4, 0x17, 0x15, 0x72,
0xa6, 0x1f, 0xfc, 0xbb, 0x09, 0xda, 0xd9, 0x7e, 0xf3, 0x14, 0x00, 0x3d, 0x19, 0x22, 0xbe, 0xdc, 0x54, 0xca, 0x06, 0xbf, 0x0e, 0x1a, 0x73, 0x6c, 0xe3, 0x19, 0x31, 0xb4, 0xf3, 0xf6, 0xb6, 0xa0,
0x71, 0x5e, 0xaf, 0x6b, 0x48, 0xe9, 0xb8, 0xc9, 0x66, 0x73, 0x2a, 0xf2, 0x51, 0x8e, 0x08, 0xfe, 0xde, 0x0c, 0x03, 0xb5, 0x71, 0x1c, 0x0b, 0x51, 0xaa, 0xef, 0xfe, 0xa7, 0x06, 0x1a, 0xe9, 0xf2,
0x18, 0xb4, 0xc5, 0xe6, 0x2b, 0xc6, 0xc1, 0xf2, 0x4b, 0x8f, 0x03, 0x11, 0xfd, 0x24, 0x21, 0x40, 0xf4, 0x18, 0x00, 0x3d, 0x9e, 0x50, 0x9e, 0x5c, 0xa0, 0x5e, 0xaf, 0xea, 0x76, 0xc9, 0x2c, 0x4b,
0x19, 0x17, 0x9c, 0xe5, 0xb3, 0xf8, 0x05, 0x47, 0x1b, 0x2c, 0x66, 0x5c, 0xb8, 0x28, 0xb1, 0xf2, 0x07, 0x7f, 0x22, 0xf2, 0x50, 0x86, 0x08, 0xfe, 0x04, 0x34, 0xc4, 0x5a, 0x2d, 0x66, 0xcd, 0xf2,
0x01, 0x23, 0xf7, 0xbe, 0x55, 0x51, 0x73, 0x75, 0x2b, 0xdd, 0x10, 0xb4, 0xc5, 0x8e, 0x4a, 0x0c, 0x4b, 0xcf, 0x1a, 0xe1, 0xfd, 0x38, 0x26, 0x40, 0x29, 0x17, 0x9c, 0x66, 0xa3, 0xf8, 0x25, 0xe7,
0x62, 0x88, 0x67, 0xd3, 0xd4, 0x6e, 0x48, 0x68, 0x7b, 0x92, 0x28, 0x50, 0x86, 0xe1, 0xc4, 0xf1, 0x26, 0xcc, 0x47, 0x5c, 0x1c, 0x51, 0x60, 0xe5, 0xd3, 0x4b, 0x2e, 0x95, 0xab, 0x22, 0xe7, 0xaa,
0xf2, 0x29, 0x57, 0xe0, 0x94, 0x38, 0x7e, 0xf2, 0x48, 0x6a, 0x79, 0x9b, 0x66, 0xc4, 0xb3, 0x4d, 0xf6, 0xc5, 0x3e, 0x68, 0x88, 0x8e, 0x43, 0x0c, 0x62, 0x88, 0xb2, 0xa9, 0x69, 0x3b, 0x12, 0xda,
0x07, 0xf3, 0x7f, 0x1f, 0x44, 0x77, 0x94, 0x6d, 0xfa, 0x24, 0x13, 0xa3, 0x3c, 0x06, 0x3e, 0x04, 0x18, 0xc7, 0x0a, 0x94, 0x62, 0x38, 0x71, 0xb4, 0xd9, 0xca, 0xfd, 0x3a, 0x21, 0x8e, 0x4a, 0x1e,
0x5d, 0x79, 0x8a, 0xac, 0xd1, 0xac, 0x8b, 0x6a, 0xe8, 0x49, 0x27, 0xdd, 0xc3, 0x92, 0x1e, 0x2d, 0x49, 0x2d, 0x9f, 0x01, 0x8c, 0xb8, 0x73, 0xd3, 0xc6, 0xfc, 0xdb, 0x44, 0xb4, 0x5e, 0x39, 0x03,
0x58, 0xc0, 0xf7, 0xc1, 0xe6, 0xac, 0xd0, 0xab, 0x80, 0xa0, 0xb8, 0xc1, 0x77, 0x81, 0x62, 0xa3, 0x4e, 0x52, 0x31, 0xca, 0x62, 0xe0, 0x7d, 0xd0, 0x92, 0x6f, 0x91, 0x36, 0x9a, 0x75, 0x91, 0x0d,
0x2a, 0xe2, 0xe0, 0xaf, 0x1b, 0xe0, 0x4e, 0xe0, 0xe8, 0x34, 0x70, 0x18, 0x31, 0x92, 0x20, 0x89, 0x6d, 0x79, 0x48, 0x6b, 0x50, 0xd0, 0xa3, 0x05, 0x0b, 0xf8, 0x1e, 0xd8, 0x9c, 0xe6, 0x7a, 0x15,
0x31, 0xa6, 0x86, 0x2f, 0x1e, 0x6e, 0xe7, 0xe0, 0x9d, 0xca, 0xc2, 0x7a, 0x5a, 0x6d, 0x13, 0x3f, 0x10, 0x14, 0xa2, 0xd7, 0xe6, 0x1b, 0x55, 0x1e, 0x07, 0x7f, 0xad, 0x80, 0x9b, 0xbe, 0xad, 0x53,
0xf3, 0x1a, 0x25, 0xaa, 0xf3, 0x04, 0x15, 0xd0, 0xf4, 0x08, 0x36, 0xce, 0xc5, 0xeb, 0x6e, 0x6a, 0xdf, 0x66, 0xc4, 0x88, 0x9d, 0x24, 0xc6, 0x88, 0x1a, 0x9e, 0x28, 0xdc, 0xe6, 0xe1, 0xdb, 0xa5,
0x6d, 0x3e, 0x3e, 0x11, 0x17, 0xa0, 0x58, 0x3e, 0xf8, 0x7d, 0x03, 0x6c, 0x97, 0xfe, 0x9b, 0xf9, 0x89, 0xf5, 0xb8, 0xdc, 0x26, 0x2a, 0xf3, 0x0a, 0x25, 0xaa, 0x3a, 0x09, 0xaa, 0xa0, 0xe6, 0x12,
0xff, 0x5f, 0x57, 0x07, 0x53, 0xb0, 0x30, 0xee, 0xe0, 0x47, 0xa0, 0xe9, 0x05, 0x16, 0x49, 0x9e, 0x6c, 0x9c, 0x8b, 0xea, 0xae, 0x69, 0x0d, 0x3e, 0x9b, 0x11, 0x17, 0xa0, 0x48, 0xde, 0xfd, 0xa3,
0xed, 0xdb, 0xd7, 0x1a, 0x9d, 0x28, 0xb0, 0x48, 0xb6, 0x58, 0xf0, 0x2f, 0x1f, 0xc5, 0x34, 0x83, 0x02, 0xb6, 0x0b, 0x9f, 0x4a, 0xff, 0xff, 0xbb, 0x70, 0x77, 0x02, 0x16, 0x66, 0x29, 0xfc, 0x08,
0xbf, 0x35, 0xc0, 0x5b, 0x65, 0xf8, 0x13, 0xe7, 0x7b, 0xcf, 0x4d, 0x76, 0x48, 0x0d, 0xe2, 0x23, 0xd4, 0x5c, 0xdf, 0x22, 0x71, 0xd9, 0xbe, 0x75, 0xa5, 0xb9, 0x8c, 0x7c, 0x8b, 0xa4, 0x5b, 0x0b,
0xf2, 0x8b, 0xc0, 0xf4, 0x44, 0xdf, 0xe1, 0x45, 0xa2, 0x53, 0x87, 0x61, 0x7e, 0x2d, 0x1f, 0x61, 0x7f, 0xf2, 0x50, 0x44, 0xd3, 0xfd, 0xbb, 0x02, 0x6e, 0x17, 0xe1, 0x8f, 0xec, 0x1f, 0x3c, 0x33,
0x3b, 0xd9, 0x76, 0x45, 0x91, 0x1c, 0xe6, 0x15, 0xa8, 0x88, 0x83, 0x13, 0xd0, 0xa2, 0x2e, 0xf1, 0xd9, 0x80, 0x1a, 0xc4, 0x43, 0xe4, 0x17, 0xbe, 0xe9, 0x8a, 0xbe, 0xc3, 0x93, 0x44, 0xa7, 0x36,
0x30, 0x9f, 0x32, 0xf1, 0xa6, 0xfb, 0x7e, 0x32, 0x0a, 0x9e, 0x48, 0xf9, 0x65, 0xa8, 0xdc, 0xbd, 0xc3, 0xfc, 0x5a, 0x3e, 0xc2, 0xf3, 0x78, 0x95, 0x16, 0x49, 0x32, 0xc8, 0x2a, 0x50, 0x1e, 0x07,
0x22, 0x8c, 0x04, 0x86, 0x52, 0x22, 0x38, 0x00, 0x6b, 0x67, 0xd8, 0x0a, 0x08, 0x5f, 0x48, 0x56, 0xc7, 0xa0, 0x4e, 0x1d, 0xe2, 0x62, 0x3e, 0x65, 0xa2, 0x35, 0xfa, 0xbd, 0x78, 0x14, 0x3c, 0x92,
0xf6, 0x9b, 0x1a, 0xe0, 0xef, 0xe9, 0x47, 0x42, 0x82, 0xa4, 0x66, 0xf0, 0xe7, 0xca, 0xc3, 0x8d, 0xf2, 0x8b, 0x40, 0xbd, 0x75, 0x89, 0x1b, 0x31, 0x0c, 0x25, 0x44, 0xb0, 0x0b, 0xd6, 0xce, 0xb0,
0xa9, 0x91, 0x75, 0xb0, 0x31, 0x66, 0x8c, 0x78, 0x0e, 0xfc, 0xb0, 0xb0, 0xc1, 0xbf, 0x57, 0xda, 0xe5, 0x13, 0xbe, 0xed, 0xac, 0x1c, 0xd4, 0x34, 0xc0, 0xeb, 0xe9, 0xc7, 0x42, 0x82, 0xa4, 0xa6,
0xe0, 0xef, 0x56, 0xec, 0xe1, 0x79, 0x9a, 0xff, 0xd5, 0x52, 0x3f, 0xb8, 0x58, 0x06, 0x3b, 0x55, 0xfb, 0x97, 0xd2, 0x97, 0x1b, 0x51, 0x23, 0xed, 0x60, 0x23, 0xcc, 0x18, 0x71, 0x6d, 0xf8, 0x41,
0xd9, 0x84, 0x1f, 0xc4, 0xbd, 0x8a, 0x3a, 0x32, 0xe2, 0xfd, 0x7c, 0xaf, 0xa2, 0xce, 0x65, 0xa8, 0xee, 0xf3, 0xe0, 0xdd, 0xc2, 0xe7, 0xc1, 0xad, 0x92, 0x25, 0x3f, 0x4b, 0xf3, 0xbf, 0xfa, 0x62,
0xdc, 0x2e, 0xdb, 0xc5, 0x1a, 0x24, 0xed, 0xa0, 0x03, 0x3a, 0x34, 0xbb, 0x61, 0x59, 0xa4, 0xdf, 0xe8, 0x3e, 0x5f, 0x06, 0xbb, 0x65, 0xd1, 0x84, 0xef, 0x47, 0xbd, 0x8a, 0xda, 0xd2, 0xe3, 0x83,
0xbe, 0x56, 0x3d, 0x55, 0x17, 0x48, 0xdc, 0xa9, 0xf2, 0xba, 0xbc, 0x03, 0xf8, 0x4b, 0xb0, 0x4d, 0x6c, 0xaf, 0xa2, 0xf6, 0x45, 0xa0, 0xde, 0x28, 0xda, 0x45, 0x1a, 0x24, 0xed, 0xa0, 0x0d, 0x9a,
0x8b, 0x77, 0x2f, 0x32, 0x77, 0x7d, 0x9f, 0x55, 0x79, 0xd3, 0xee, 0xc8, 0x73, 0x6f, 0x97, 0xf4, 0x34, 0xbd, 0x61, 0x99, 0xa4, 0xdf, 0xbd, 0x52, 0x3e, 0x95, 0x27, 0x48, 0xd4, 0xa9, 0xb2, 0xba,
0xa8, 0xec, 0x6c, 0xf0, 0x87, 0x06, 0xa8, 0xeb, 0x2c, 0x70, 0x9c, 0xef, 0xe8, 0xfc, 0x65, 0xb5, 0xec, 0x01, 0xf0, 0x97, 0x60, 0x9b, 0xe6, 0xef, 0x5e, 0x44, 0xee, 0xea, 0x67, 0x96, 0xc5, 0x4d,
0xb5, 0x83, 0x42, 0x37, 0xbf, 0x0c, 0x95, 0xd7, 0xeb, 0x7e, 0x63, 0xe4, 0x69, 0xf7, 0xd5, 0xa7, 0xbb, 0x29, 0xdf, 0x7b, 0xbb, 0xa0, 0x47, 0xc5, 0xc3, 0xba, 0x4f, 0x40, 0x7e, 0x6d, 0x84, 0x1f,
0x8f, 0x1f, 0xe6, 0x5b, 0xfe, 0x87, 0x69, 0xcb, 0x5f, 0x16, 0x74, 0xc3, 0xac, 0xdd, 0x5f, 0x8f, 0xe6, 0x4b, 0xe9, 0xf6, 0x17, 0x2f, 0x9f, 0x97, 0xd4, 0xd1, 0x6f, 0x15, 0xb0, 0xb3, 0x80, 0xe5,
0x4b, 0x9a, 0x6b, 0xdf, 0xbc, 0x78, 0xd1, 0x5f, 0xfa, 0xec, 0x45, 0x7f, 0xe9, 0xf3, 0x17, 0xfd, 0x6b, 0x60, 0x32, 0x05, 0xe2, 0xd6, 0x1a, 0xc5, 0x4b, 0xac, 0x81, 0xe3, 0x82, 0x0e, 0x2d, 0xa0,
0xa5, 0x5f, 0x45, 0xfd, 0xc6, 0x45, 0xd4, 0x6f, 0x7c, 0x16, 0xf5, 0x1b, 0x9f, 0x47, 0xfd, 0xc6, 0xf9, 0x9e, 0x96, 0xc8, 0x06, 0xbc, 0xf9, 0xc9, 0x2f, 0x03, 0x31, 0xcf, 0xc6, 0x39, 0x0d, 0x2a,
0xdf, 0xa3, 0x7e, 0xe3, 0x37, 0xff, 0xe8, 0x2f, 0x7d, 0x7c, 0xb3, 0xe2, 0x47, 0xdf, 0xff, 0x04, 0x20, 0xbb, 0x7f, 0x52, 0x40, 0x55, 0x2f, 0x85, 0xa3, 0xec, 0x0c, 0xe3, 0x17, 0xd0, 0xd0, 0x0e,
0x00, 0x00, 0xff, 0xff, 0xe0, 0x48, 0x1b, 0x03, 0x0a, 0x16, 0x00, 0x00, 0x73, 0xf3, 0xeb, 0x22, 0x50, 0x5f, 0xaf, 0xfa, 0xcb, 0x96, 0x27, 0xba, 0xd7, 0x7b, 0xfc, 0xf0,
0x7e, 0x76, 0xc8, 0x7d, 0x90, 0x0c, 0xb9, 0x65, 0x41, 0xd7, 0x4f, 0x07, 0xdc, 0xd5, 0xb8, 0xa4,
0xb9, 0xf6, 0xed, 0xe7, 0x2f, 0x3a, 0x4b, 0x9f, 0xbd, 0xe8, 0x2c, 0x7d, 0xfe, 0xa2, 0xb3, 0xf4,
0xab, 0xb0, 0xa3, 0x3c, 0x0f, 0x3b, 0xca, 0x67, 0x61, 0x47, 0xf9, 0x3c, 0xec, 0x28, 0xff, 0x08,
0x3b, 0xca, 0x6f, 0xfe, 0xd9, 0x59, 0xfa, 0xf8, 0x5a, 0xc9, 0x7f, 0xe8, 0xff, 0x0d, 0x00, 0x00,
0xff, 0xff, 0x1e, 0x70, 0x68, 0xe1, 0x59, 0x17, 0x00, 0x00,
} }
func (m *CronJob) Marshal() (dAtA []byte, err error) { func (m *CronJob) Marshal() (dAtA []byte, err error) {
@@ -1030,6 +1093,20 @@ func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i _ = i
var l int var l int
_ = l _ = l
if m.SuccessPolicy != nil {
{
size, err := m.SuccessPolicy.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0x82
}
if m.ManagedBy != nil { if m.ManagedBy != nil {
i -= len(*m.ManagedBy) i -= len(*m.ManagedBy)
copy(dAtA[i:], *m.ManagedBy) copy(dAtA[i:], *m.ManagedBy)
@@ -1456,6 +1533,78 @@ func (m *PodFailurePolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil return len(dAtA) - i, nil
} }
func (m *SuccessPolicy) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SuccessPolicy) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SuccessPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Rules) > 0 {
for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *SuccessPolicyRule) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SuccessPolicyRule) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *SuccessPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.SucceededCount != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.SucceededCount))
i--
dAtA[i] = 0x10
}
if m.SucceededIndexes != nil {
i -= len(*m.SucceededIndexes)
copy(dAtA[i:], *m.SucceededIndexes)
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SucceededIndexes)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) { func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) {
size := m.Size() size := m.Size()
dAtA = make([]byte, size) dAtA = make([]byte, size)
@@ -1702,6 +1851,10 @@ func (m *JobSpec) Size() (n int) {
l = len(*m.ManagedBy) l = len(*m.ManagedBy)
n += 1 + l + sovGenerated(uint64(l)) n += 1 + l + sovGenerated(uint64(l))
} }
if m.SuccessPolicy != nil {
l = m.SuccessPolicy.Size()
n += 2 + l + sovGenerated(uint64(l))
}
return n return n
} }
@@ -1829,6 +1982,37 @@ func (m *PodFailurePolicyRule) Size() (n int) {
return n return n
} }
func (m *SuccessPolicy) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Rules) > 0 {
for _, e := range m.Rules {
l = e.Size()
n += 1 + l + sovGenerated(uint64(l))
}
}
return n
}
func (m *SuccessPolicyRule) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.SucceededIndexes != nil {
l = len(*m.SucceededIndexes)
n += 1 + l + sovGenerated(uint64(l))
}
if m.SucceededCount != nil {
n += 1 + sovGenerated(uint64(*m.SucceededCount))
}
return n
}
func (m *UncountedTerminatedPods) Size() (n int) { func (m *UncountedTerminatedPods) Size() (n int) {
if m == nil { if m == nil {
return 0 return 0
@@ -1981,6 +2165,7 @@ func (this *JobSpec) String() string {
`MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`, `MaxFailedIndexes:` + valueToStringGenerated(this.MaxFailedIndexes) + `,`,
`PodReplacementPolicy:` + valueToStringGenerated(this.PodReplacementPolicy) + `,`, `PodReplacementPolicy:` + valueToStringGenerated(this.PodReplacementPolicy) + `,`,
`ManagedBy:` + valueToStringGenerated(this.ManagedBy) + `,`, `ManagedBy:` + valueToStringGenerated(this.ManagedBy) + `,`,
`SuccessPolicy:` + strings.Replace(this.SuccessPolicy.String(), "SuccessPolicy", "SuccessPolicy", 1) + `,`,
`}`, `}`,
}, "") }, "")
return s return s
@@ -2076,6 +2261,32 @@ func (this *PodFailurePolicyRule) String() string {
}, "") }, "")
return s return s
} }
func (this *SuccessPolicy) String() string {
if this == nil {
return "nil"
}
repeatedStringForRules := "[]SuccessPolicyRule{"
for _, f := range this.Rules {
repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "SuccessPolicyRule", "SuccessPolicyRule", 1), `&`, ``, 1) + ","
}
repeatedStringForRules += "}"
s := strings.Join([]string{`&SuccessPolicy{`,
`Rules:` + repeatedStringForRules + `,`,
`}`,
}, "")
return s
}
func (this *SuccessPolicyRule) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&SuccessPolicyRule{`,
`SucceededIndexes:` + valueToStringGenerated(this.SucceededIndexes) + `,`,
`SucceededCount:` + valueToStringGenerated(this.SucceededCount) + `,`,
`}`,
}, "")
return s
}
func (this *UncountedTerminatedPods) String() string { func (this *UncountedTerminatedPods) String() string {
if this == nil { if this == nil {
return "nil" return "nil"
@@ -3703,6 +3914,42 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error {
s := string(dAtA[iNdEx:postIndex]) s := string(dAtA[iNdEx:postIndex])
m.ManagedBy = &s m.ManagedBy = &s
iNdEx = postIndex iNdEx = postIndex
case 16:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SuccessPolicy", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SuccessPolicy == nil {
m.SuccessPolicy = &SuccessPolicy{}
}
if err := m.SuccessPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:]) skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -4735,6 +4982,193 @@ func (m *PodFailurePolicyRule) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *SuccessPolicy) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SuccessPolicy: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SuccessPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Rules = append(m.Rules, SuccessPolicyRule{})
if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SuccessPolicyRule) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SuccessPolicyRule: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SuccessPolicyRule: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SucceededIndexes", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(dAtA[iNdEx:postIndex])
m.SucceededIndexes = &s
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field SucceededCount", wireType)
}
var v int32
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int32(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.SucceededCount = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *UncountedTerminatedPods) Unmarshal(dAtA []byte) error { func (m *UncountedTerminatedPods) Unmarshal(dAtA []byte) error {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

View File

@@ -218,6 +218,17 @@ message JobSpec {
// +optional // +optional
optional PodFailurePolicy podFailurePolicy = 11; optional PodFailurePolicy podFailurePolicy = 11;
// successPolicy specifies the policy when the Job can be declared as succeeded.
// If empty, the default behavior applies - the Job is declared as succeeded
// only when the number of succeeded pods equals to the completions.
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
// This field is alpha-level. To use this field, you must enable the
// `JobSuccessPolicy` feature gate (disabled by default).
// +optional
optional SuccessPolicy successPolicy = 16;
// Specifies the number of retries before marking this job failed. // Specifies the number of retries before marking this job failed.
// Defaults to 6 // Defaults to 6
// +optional // +optional
@@ -569,6 +580,51 @@ message PodFailurePolicyRule {
repeated PodFailurePolicyOnPodConditionsPattern onPodConditions = 3; repeated PodFailurePolicyOnPodConditionsPattern onPodConditions = 3;
} }
// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.
message SuccessPolicy {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
// +listType=atomic
repeated SuccessPolicyRule rules = 1;
}
// SuccessPolicyRule describes rule for declaring a Job as succeeded.
// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified.
message SuccessPolicyRule {
// succeededIndexes specifies the set of indexes
// which need to be contained in the actual set of the succeeded indexes for the Job.
// The list of indexes must be within 0 to ".spec.completions-1" and
// must not contain duplicates. At least one element is required.
// The indexes are represented as intervals separated by commas.
// The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen.
// The number are listed in represented by the first and last element of the series,
// separated by a hyphen.
// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// When this field is null, this field doesn't default to any value
// and is never evaluated at any time.
//
// +optional
optional string succeededIndexes = 1;
// succeededCount specifies the minimal required size of the actual set of the succeeded indexes
// for the Job. When succeededCount is used along with succeededIndexes, the check is
// constrained only to the set of indexes specified by succeededIndexes.
// For example, given that succeededIndexes is "1-4", succeededCount is "3",
// and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded
// because only "1" and "3" indexes are considered in that rules.
// When this field is null, this doesn't default to any value and
// is never evaluated at any time.
// When specified it needs to be a positive integer.
//
// +optional
optional int32 succeededCount = 2;
}
// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't // UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
// been accounted in Job status counters. // been accounted in Job status counters.
message UncountedTerminatedPods { message UncountedTerminatedPods {

View File

@@ -255,6 +255,51 @@ type PodFailurePolicy struct {
Rules []PodFailurePolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"` Rules []PodFailurePolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"`
} }
// SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.
type SuccessPolicy struct {
// rules represents the list of alternative rules for the declaring the Jobs
// as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met,
// the "SucceededCriteriaMet" condition is added, and the lingering pods are removed.
// The terminal state for such a Job has the "Complete" condition.
// Additionally, these rules are evaluated in order; Once the Job meets one of the rules,
// other rules are ignored. At most 20 elements are allowed.
// +listType=atomic
Rules []SuccessPolicyRule `json:"rules" protobuf:"bytes,1,opt,name=rules"`
}
// SuccessPolicyRule describes rule for declaring a Job as succeeded.
// Each rule must have at least one of the "succeededIndexes" or "succeededCount" specified.
type SuccessPolicyRule struct {
// succeededIndexes specifies the set of indexes
// which need to be contained in the actual set of the succeeded indexes for the Job.
// The list of indexes must be within 0 to ".spec.completions-1" and
// must not contain duplicates. At least one element is required.
// The indexes are represented as intervals separated by commas.
// The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen.
// The number are listed in represented by the first and last element of the series,
// separated by a hyphen.
// For example, if the completed indexes are 1, 3, 4, 5 and 7, they are
// represented as "1,3-5,7".
// When this field is null, this field doesn't default to any value
// and is never evaluated at any time.
//
// +optional
SucceededIndexes *string `json:"succeededIndexes,omitempty" protobuf:"bytes,1,opt,name=succeededIndexes"`
// succeededCount specifies the minimal required size of the actual set of the succeeded indexes
// for the Job. When succeededCount is used along with succeededIndexes, the check is
// constrained only to the set of indexes specified by succeededIndexes.
// For example, given that succeededIndexes is "1-4", succeededCount is "3",
// and completed indexes are "1", "3", and "5", the Job isn't declared as succeeded
// because only "1" and "3" indexes are considered in that rules.
// When this field is null, this doesn't default to any value and
// is never evaluated at any time.
// When specified it needs to be a positive integer.
//
// +optional
SucceededCount *int32 `json:"succeededCount,omitempty" protobuf:"varint,2,opt,name=succeededCount"`
}
// JobSpec describes how the job execution will look like. // JobSpec describes how the job execution will look like.
type JobSpec struct { type JobSpec struct {
@@ -296,6 +341,17 @@ type JobSpec struct {
// +optional // +optional
PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"` PodFailurePolicy *PodFailurePolicy `json:"podFailurePolicy,omitempty" protobuf:"bytes,11,opt,name=podFailurePolicy"`
// successPolicy specifies the policy when the Job can be declared as succeeded.
// If empty, the default behavior applies - the Job is declared as succeeded
// only when the number of succeeded pods equals to the completions.
// When the field is specified, it must be immutable and works only for the Indexed Jobs.
// Once the Job meets the SuccessPolicy, the lingering pods are terminated.
//
// This field is alpha-level. To use this field, you must enable the
// `JobSuccessPolicy` feature gate (disabled by default).
// +optional
SuccessPolicy *SuccessPolicy `json:"successPolicy,omitempty" protobuf:"bytes,16,opt,name=successPolicy"`
// Specifies the number of retries before marking this job failed. // Specifies the number of retries before marking this job failed.
// Defaults to 6 // Defaults to 6
// +optional // +optional
@@ -572,6 +628,8 @@ const (
JobFailed JobConditionType = "Failed" JobFailed JobConditionType = "Failed"
// FailureTarget means the job is about to fail its execution. // FailureTarget means the job is about to fail its execution.
JobFailureTarget JobConditionType = "FailureTarget" JobFailureTarget JobConditionType = "FailureTarget"
// JobSuccessCriteriaMet means the Job has been succeeded.
JobSuccessCriteriaMet JobConditionType = "SuccessCriteriaMet"
) )
const ( const (
@@ -591,6 +649,11 @@ const (
// JobReasonFailedIndexes means Job has failed indexes. // JobReasonFailedIndexes means Job has failed indexes.
// This const is used in beta-level feature: https://kep.k8s.io/3850. // This const is used in beta-level feature: https://kep.k8s.io/3850.
JobReasonFailedIndexes string = "FailedIndexes" JobReasonFailedIndexes string = "FailedIndexes"
// JobReasonSuccessPolicy reason indicates a SuccessCriteriaMet condition is added due to
// a Job met successPolicy.
// https://kep.k8s.io/3998
// This is currently an alpha field.
JobReasonSuccessPolicy string = "SuccessPolicy"
) )
// JobCondition describes current state of a job. // JobCondition describes current state of a job.

View File

@@ -116,6 +116,7 @@ var map_JobSpec = map[string]string{
"completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
"activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.",
"podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", "podFailurePolicy": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).",
"successPolicy": "successPolicy specifies the policy when the Job can be declared as succeeded. If empty, the default behavior applies - the Job is declared as succeeded only when the number of succeeded pods equals to the completions. When the field is specified, it must be immutable and works only for the Indexed Jobs. Once the Job meets the SuccessPolicy, the lingering pods are terminated.\n\nThis field is alpha-level. To use this field, you must enable the `JobSuccessPolicy` feature gate (disabled by default).",
"backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6",
"backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "backoffLimitPerIndex": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
"maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", "maxFailedIndexes": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).",
@@ -203,6 +204,25 @@ func (PodFailurePolicyRule) SwaggerDoc() map[string]string {
return map_PodFailurePolicyRule return map_PodFailurePolicyRule
} }
var map_SuccessPolicy = map[string]string{
"": "SuccessPolicy describes when a Job can be declared as succeeded based on the success of some indexes.",
"rules": "rules represents the list of alternative rules for the declaring the Jobs as successful before `.status.succeeded >= .spec.completions`. Once any of the rules are met, the \"SucceededCriteriaMet\" condition is added, and the lingering pods are removed. The terminal state for such a Job has the \"Complete\" condition. Additionally, these rules are evaluated in order; Once the Job meets one of the rules, other rules are ignored. At most 20 elements are allowed.",
}
func (SuccessPolicy) SwaggerDoc() map[string]string {
return map_SuccessPolicy
}
var map_SuccessPolicyRule = map[string]string{
"": "SuccessPolicyRule describes rule for declaring a Job as succeeded. Each rule must have at least one of the \"succeededIndexes\" or \"succeededCount\" specified.",
"succeededIndexes": "succeededIndexes specifies the set of indexes which need to be contained in the actual set of the succeeded indexes for the Job. The list of indexes must be within 0 to \".spec.completions-1\" and must not contain duplicates. At least one element is required. The indexes are represented as intervals separated by commas. The intervals can be a decimal integer or a pair of decimal integers separated by a hyphen. The number are listed in represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". When this field is null, this field doesn't default to any value and is never evaluated at any time.",
"succeededCount": "succeededCount specifies the minimal required size of the actual set of the succeeded indexes for the Job. When succeededCount is used along with succeededIndexes, the check is constrained only to the set of indexes specified by succeededIndexes. For example, given that succeededIndexes is \"1-4\", succeededCount is \"3\", and completed indexes are \"1\", \"3\", and \"5\", the Job isn't declared as succeeded because only \"1\" and \"3\" indexes are considered in that rules. When this field is null, this doesn't default to any value and is never evaluated at any time. When specified it needs to be a positive integer.",
}
func (SuccessPolicyRule) SwaggerDoc() map[string]string {
return map_SuccessPolicyRule
}
var map_UncountedTerminatedPods = map[string]string{ var map_UncountedTerminatedPods = map[string]string{
"": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.",
"succeeded": "succeeded holds UIDs of succeeded Pods.", "succeeded": "succeeded holds UIDs of succeeded Pods.",

View File

@@ -262,6 +262,11 @@ func (in *JobSpec) DeepCopyInto(out *JobSpec) {
*out = new(PodFailurePolicy) *out = new(PodFailurePolicy)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.SuccessPolicy != nil {
in, out := &in.SuccessPolicy, &out.SuccessPolicy
*out = new(SuccessPolicy)
(*in).DeepCopyInto(*out)
}
if in.BackoffLimit != nil { if in.BackoffLimit != nil {
in, out := &in.BackoffLimit, &out.BackoffLimit in, out := &in.BackoffLimit, &out.BackoffLimit
*out = new(int32) *out = new(int32)
@@ -486,6 +491,55 @@ func (in *PodFailurePolicyRule) DeepCopy() *PodFailurePolicyRule {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicy) DeepCopyInto(out *SuccessPolicy) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]SuccessPolicyRule, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicy.
func (in *SuccessPolicy) DeepCopy() *SuccessPolicy {
if in == nil {
return nil
}
out := new(SuccessPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SuccessPolicyRule) DeepCopyInto(out *SuccessPolicyRule) {
*out = *in
if in.SucceededIndexes != nil {
in, out := &in.SucceededIndexes, &out.SucceededIndexes
*out = new(string)
**out = **in
}
if in.SucceededCount != nil {
in, out := &in.SucceededCount, &out.SucceededCount
*out = new(int32)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessPolicyRule.
func (in *SuccessPolicyRule) DeepCopy() *SuccessPolicyRule {
if in == nil {
return nil
}
out := new(SuccessPolicyRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) { func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) {
*out = *in *out = *in

View File

@@ -116,6 +116,14 @@
} }
] ]
}, },
"successPolicy": {
"rules": [
{
"succeededIndexes": "succeededIndexesValue",
"succeededCount": 2
}
]
},
"backoffLimit": 7, "backoffLimit": 7,
"backoffLimitPerIndex": 12, "backoffLimitPerIndex": 12,
"maxFailedIndexes": 13, "maxFailedIndexes": 13,

View File

@@ -98,6 +98,10 @@ spec:
- valuesValue - valuesValue
matchLabels: matchLabels:
matchLabelsKey: matchLabelsValue matchLabelsKey: matchLabelsValue
successPolicy:
rules:
- succeededCount: 2
succeededIndexes: succeededIndexesValue
suspend: true suspend: true
template: template:
metadata: metadata:

View File

@@ -67,6 +67,14 @@
} }
] ]
}, },
"successPolicy": {
"rules": [
{
"succeededIndexes": "succeededIndexesValue",
"succeededCount": 2
}
]
},
"backoffLimit": 7, "backoffLimit": 7,
"backoffLimitPerIndex": 12, "backoffLimitPerIndex": 12,
"maxFailedIndexes": 13, "maxFailedIndexes": 13,

View File

@@ -62,6 +62,10 @@ spec:
- valuesValue - valuesValue
matchLabels: matchLabels:
matchLabelsKey: matchLabelsValue matchLabelsKey: matchLabelsValue
successPolicy:
rules:
- succeededCount: 2
succeededIndexes: succeededIndexesValue
suspend: true suspend: true
template: template:
metadata: metadata:

View File

@@ -116,6 +116,14 @@
} }
] ]
}, },
"successPolicy": {
"rules": [
{
"succeededIndexes": "succeededIndexesValue",
"succeededCount": 2
}
]
},
"backoffLimit": 7, "backoffLimit": 7,
"backoffLimitPerIndex": 12, "backoffLimitPerIndex": 12,
"maxFailedIndexes": 13, "maxFailedIndexes": 13,

View File

@@ -98,6 +98,10 @@ spec:
- valuesValue - valuesValue
matchLabels: matchLabels:
matchLabelsKey: matchLabelsValue matchLabelsKey: matchLabelsValue
successPolicy:
rules:
- succeededCount: 2
succeededIndexes: succeededIndexesValue
suspend: true suspend: true
template: template:
metadata: metadata:

View File

@@ -31,6 +31,7 @@ type JobSpecApplyConfiguration struct {
Completions *int32 `json:"completions,omitempty"` Completions *int32 `json:"completions,omitempty"`
ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"` ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty"`
PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"` PodFailurePolicy *PodFailurePolicyApplyConfiguration `json:"podFailurePolicy,omitempty"`
SuccessPolicy *SuccessPolicyApplyConfiguration `json:"successPolicy,omitempty"`
BackoffLimit *int32 `json:"backoffLimit,omitempty"` BackoffLimit *int32 `json:"backoffLimit,omitempty"`
BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"` BackoffLimitPerIndex *int32 `json:"backoffLimitPerIndex,omitempty"`
MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"` MaxFailedIndexes *int32 `json:"maxFailedIndexes,omitempty"`
@@ -82,6 +83,14 @@ func (b *JobSpecApplyConfiguration) WithPodFailurePolicy(value *PodFailurePolicy
return b return b
} }
// WithSuccessPolicy sets the SuccessPolicy field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SuccessPolicy field is set to the value of the last call.
func (b *JobSpecApplyConfiguration) WithSuccessPolicy(value *SuccessPolicyApplyConfiguration) *JobSpecApplyConfiguration {
b.SuccessPolicy = value
return b
}
// WithBackoffLimit sets the BackoffLimit field in the declarative configuration to the given value // WithBackoffLimit sets the BackoffLimit field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations. // and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the BackoffLimit field is set to the value of the last call. // If called multiple times, the BackoffLimit field is set to the value of the last call.

View File

@@ -0,0 +1,44 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// SuccessPolicyApplyConfiguration represents an declarative configuration of the SuccessPolicy type for use
// with apply.
type SuccessPolicyApplyConfiguration struct {
Rules []SuccessPolicyRuleApplyConfiguration `json:"rules,omitempty"`
}
// SuccessPolicyApplyConfiguration constructs an declarative configuration of the SuccessPolicy type for use with
// apply.
func SuccessPolicy() *SuccessPolicyApplyConfiguration {
return &SuccessPolicyApplyConfiguration{}
}
// WithRules adds the given value to the Rules field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Rules field.
func (b *SuccessPolicyApplyConfiguration) WithRules(values ...*SuccessPolicyRuleApplyConfiguration) *SuccessPolicyApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithRules")
}
b.Rules = append(b.Rules, *values[i])
}
return b
}

View File

@@ -0,0 +1,48 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1
// SuccessPolicyRuleApplyConfiguration represents an declarative configuration of the SuccessPolicyRule type for use
// with apply.
type SuccessPolicyRuleApplyConfiguration struct {
SucceededIndexes *string `json:"succeededIndexes,omitempty"`
SucceededCount *int32 `json:"succeededCount,omitempty"`
}
// SuccessPolicyRuleApplyConfiguration constructs an declarative configuration of the SuccessPolicyRule type for use with
// apply.
func SuccessPolicyRule() *SuccessPolicyRuleApplyConfiguration {
return &SuccessPolicyRuleApplyConfiguration{}
}
// WithSucceededIndexes sets the SucceededIndexes field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SucceededIndexes field is set to the value of the last call.
func (b *SuccessPolicyRuleApplyConfiguration) WithSucceededIndexes(value string) *SuccessPolicyRuleApplyConfiguration {
b.SucceededIndexes = &value
return b
}
// WithSucceededCount sets the SucceededCount field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the SucceededCount field is set to the value of the last call.
func (b *SuccessPolicyRuleApplyConfiguration) WithSucceededCount(value int32) *SuccessPolicyRuleApplyConfiguration {
b.SucceededCount = &value
return b
}

View File

@@ -3880,6 +3880,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: selector - name: selector
type: type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector namedType: io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector
- name: successPolicy
type:
namedType: io.k8s.api.batch.v1.SuccessPolicy
- name: suspend - name: suspend
type: type:
scalar: boolean scalar: boolean
@@ -3992,6 +3995,24 @@ var schemaYAML = typed.YAMLObject(`types:
elementType: elementType:
namedType: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern namedType: io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern
elementRelationship: atomic elementRelationship: atomic
- name: io.k8s.api.batch.v1.SuccessPolicy
map:
fields:
- name: rules
type:
list:
elementType:
namedType: io.k8s.api.batch.v1.SuccessPolicyRule
elementRelationship: atomic
- name: io.k8s.api.batch.v1.SuccessPolicyRule
map:
fields:
- name: succeededCount
type:
scalar: numeric
- name: succeededIndexes
type:
scalar: string
- name: io.k8s.api.batch.v1.UncountedTerminatedPods - name: io.k8s.api.batch.v1.UncountedTerminatedPods
map: map:
fields: fields:

View File

@@ -559,6 +559,10 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
return &applyconfigurationsbatchv1.PodFailurePolicyOnPodConditionsPatternApplyConfiguration{} return &applyconfigurationsbatchv1.PodFailurePolicyOnPodConditionsPatternApplyConfiguration{}
case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyRule"): case batchv1.SchemeGroupVersion.WithKind("PodFailurePolicyRule"):
return &applyconfigurationsbatchv1.PodFailurePolicyRuleApplyConfiguration{} return &applyconfigurationsbatchv1.PodFailurePolicyRuleApplyConfiguration{}
case batchv1.SchemeGroupVersion.WithKind("SuccessPolicy"):
return &applyconfigurationsbatchv1.SuccessPolicyApplyConfiguration{}
case batchv1.SchemeGroupVersion.WithKind("SuccessPolicyRule"):
return &applyconfigurationsbatchv1.SuccessPolicyRuleApplyConfiguration{}
case batchv1.SchemeGroupVersion.WithKind("UncountedTerminatedPods"): case batchv1.SchemeGroupVersion.WithKind("UncountedTerminatedPods"):
return &applyconfigurationsbatchv1.UncountedTerminatedPodsApplyConfiguration{} return &applyconfigurationsbatchv1.UncountedTerminatedPodsApplyConfiguration{}

View File

@@ -494,6 +494,374 @@ func TestJobPodFailurePolicy(t *testing.T) {
} }
} }
// TestSuccessPolicy tests handling of job and its pods when
// successPolicy is used.
func TestSuccessPolicy(t *testing.T) {
type podTerminationWithExpectations struct {
index int
status v1.PodStatus
wantActive int
wantFailed int
wantSucceeded int
wantActiveIndexes sets.Set[int]
wantCompletedIndexes string
wantFailedIndexes *string
}
podTemplateSpec := v1.PodTemplateSpec{
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "main-container",
Image: "foo",
ImagePullPolicy: v1.PullIfNotPresent,
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
},
},
},
}
testCases := map[string]struct {
enableJobSuccessPolicy bool
enableBackoffLimitPerIndex bool
job batchv1.Job
podTerminations []podTerminationWithExpectations
wantConditionTypes []batchv1.JobConditionType
wantJobFinishedNumMetric []metricLabelsWithValue
}{
"all indexes succeeded; JobSuccessPolicy is enabled": {
enableJobSuccessPolicy: true,
job: batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: ptr.To[int32](1),
Completions: ptr.To[int32](1),
CompletionMode: completionModePtr(batchv1.IndexedCompletion),
Template: podTemplateSpec,
SuccessPolicy: &batchv1.SuccessPolicy{
Rules: []batchv1.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0"),
}},
},
},
},
podTerminations: []podTerminationWithExpectations{
{
index: 0,
status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
wantActive: 0,
wantFailed: 0,
wantSucceeded: 1,
wantCompletedIndexes: "0",
},
},
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobSuccessCriteriaMet, batchv1.JobComplete},
wantJobFinishedNumMetric: []metricLabelsWithValue{
{
Labels: []string{"Indexed", "succeeded", ""},
Value: 1,
},
},
},
"all indexes succeeded; JobSuccessPolicy is disabled": {
job: batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: ptr.To[int32](1),
Completions: ptr.To[int32](1),
CompletionMode: completionModePtr(batchv1.IndexedCompletion),
Template: podTemplateSpec,
SuccessPolicy: &batchv1.SuccessPolicy{
Rules: []batchv1.SuccessPolicyRule{{
SucceededIndexes: ptr.To("0"),
}},
},
},
},
podTerminations: []podTerminationWithExpectations{
{
index: 0,
status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
wantActive: 0,
wantFailed: 0,
wantSucceeded: 1,
wantCompletedIndexes: "0",
},
},
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobComplete},
wantJobFinishedNumMetric: []metricLabelsWithValue{
{
Labels: []string{"Indexed", "succeeded", ""},
Value: 1,
},
},
},
"job with successPolicy with succeededIndexes; job has SuccessCriteriaMet and Complete conditions even if some indexes remain pending": {
enableJobSuccessPolicy: true,
job: batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
CompletionMode: completionModePtr(batchv1.IndexedCompletion),
Template: podTemplateSpec,
SuccessPolicy: &batchv1.SuccessPolicy{
Rules: []batchv1.SuccessPolicyRule{{
SucceededIndexes: ptr.To("1"),
}},
},
},
},
podTerminations: []podTerminationWithExpectations{
{
index: 0,
status: v1.PodStatus{
Phase: v1.PodPending,
},
wantActive: 2,
wantActiveIndexes: sets.New(0, 1),
wantFailed: 0,
wantSucceeded: 0,
},
{
index: 1,
status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
wantActive: 0,
wantFailed: 0,
wantSucceeded: 1,
wantCompletedIndexes: "1",
},
},
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobSuccessCriteriaMet, batchv1.JobComplete},
wantJobFinishedNumMetric: []metricLabelsWithValue{
{
Labels: []string{"Indexed", "succeeded", ""},
Value: 1,
},
},
},
"job with successPolicy with succeededCount; job has SuccessCriteriaMet and Complete conditions even if some indexes remain pending": {
enableJobSuccessPolicy: true,
job: batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
CompletionMode: completionModePtr(batchv1.IndexedCompletion),
Template: podTemplateSpec,
SuccessPolicy: &batchv1.SuccessPolicy{
Rules: []batchv1.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](1),
}},
},
},
},
podTerminations: []podTerminationWithExpectations{
{
index: 0,
status: v1.PodStatus{
Phase: v1.PodPending,
},
wantActive: 2,
wantActiveIndexes: sets.New(0, 1),
wantFailed: 0,
wantSucceeded: 0,
},
{
index: 1,
status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
wantActive: 0,
wantFailed: 0,
wantSucceeded: 1,
wantCompletedIndexes: "1",
},
},
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobSuccessCriteriaMet, batchv1.JobComplete},
wantJobFinishedNumMetric: []metricLabelsWithValue{
{
Labels: []string{"Indexed", "succeeded", ""},
Value: 1,
},
},
},
"job with successPolicy and backoffLimitPerIndex; job has a Failed condition if job meets backoffLimitPerIndex": {
enableJobSuccessPolicy: true,
enableBackoffLimitPerIndex: true,
job: batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: ptr.To[int32](2),
Completions: ptr.To[int32](2),
CompletionMode: completionModePtr(batchv1.IndexedCompletion),
BackoffLimitPerIndex: ptr.To[int32](0),
Template: podTemplateSpec,
SuccessPolicy: &batchv1.SuccessPolicy{
Rules: []batchv1.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](1),
}},
},
},
},
podTerminations: []podTerminationWithExpectations{
{
index: 0,
status: v1.PodStatus{
Phase: v1.PodFailed,
},
wantActive: 1,
wantActiveIndexes: sets.New(1),
wantFailed: 1,
wantFailedIndexes: ptr.To("0"),
wantSucceeded: 0,
},
{
index: 1,
status: v1.PodStatus{
Phase: v1.PodSucceeded,
},
wantActive: 0,
wantFailed: 1,
wantSucceeded: 1,
wantFailedIndexes: ptr.To("0"),
wantCompletedIndexes: "1",
},
},
wantConditionTypes: []batchv1.JobConditionType{batchv1.JobFailed},
},
}
for name, tc := range testCases {
t.Run(name, func(t *testing.T) {
resetMetrics()
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, tc.enableJobSuccessPolicy)()
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobBackoffLimitPerIndex, tc.enableBackoffLimitPerIndex)()
closeFn, restConfig, clientSet, ns := setup(t, "simple")
defer closeFn()
ctx, cancel := startJobControllerAndWaitForCaches(t, restConfig)
defer func() {
cancel()
}()
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &tc.job)
if err != nil {
t.Fatalf("Error %v while creating the Job %q", err, jobObj.Name)
}
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Active: int(*tc.job.Spec.Parallelism),
Ready: ptr.To[int32](0),
Terminating: ptr.To[int32](0),
})
for _, podTermination := range tc.podTerminations {
pod, err := getActivePodForIndex(ctx, clientSet, jobObj, podTermination.index)
if err != nil {
t.Fatalf("Listing Job Pods: %v", err)
}
pod.Status = podTermination.status
if _, err = clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Error updating the Pod %q: %v", klog.KObj(pod), err)
}
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Active: podTermination.wantActive,
Succeeded: podTermination.wantSucceeded,
Failed: podTermination.wantFailed,
Ready: ptr.To[int32](0),
Terminating: ptr.To[int32](0),
})
validateIndexedJobPods(ctx, t, clientSet, jobObj, podTermination.wantActiveIndexes, podTermination.wantCompletedIndexes, podTermination.wantFailedIndexes)
}
for i := range tc.wantConditionTypes {
validateJobCondition(ctx, t, clientSet, jobObj, tc.wantConditionTypes[i])
}
for i := range tc.wantJobFinishedNumMetric {
validateCounterMetric(ctx, t, metrics.JobFinishedNum, tc.wantJobFinishedNumMetric[i])
}
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
})
}
}
// TestSuccessPolicy_ReEnabling tests handling of pod successful when
// re-enabling the JobSuccessPolicy feature.
func TestSuccessPolicy_ReEnabling(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, true)()
closeFn, resetConfig, clientSet, ns := setup(t, "success-policy-re-enabling")
defer closeFn()
ctx, cancel := startJobControllerAndWaitForCaches(t, resetConfig)
defer cancel()
resetMetrics()
jobObj, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
Spec: batchv1.JobSpec{
Parallelism: ptr.To[int32](5),
Completions: ptr.To[int32](5),
CompletionMode: completionModePtr(batchv1.IndexedCompletion),
SuccessPolicy: &batchv1.SuccessPolicy{
Rules: []batchv1.SuccessPolicyRule{{
SucceededCount: ptr.To[int32](3),
}},
},
},
})
if err != nil {
t.Fatalf("Failed to create Job: %v", err)
}
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Active: 5,
Ready: ptr.To[int32](0),
Terminating: ptr.To[int32](0),
})
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(0, 1, 2, 3, 4), "", nil)
// First pod from index 0 succeeded
if err = setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodSucceeded, 0); err != nil {
t.Fatalf("Failed tring to succeess pod with index 0")
}
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Active: 4,
Succeeded: 1,
Ready: ptr.To[int32](0),
Terminating: ptr.To[int32](0),
})
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(1, 2, 3, 4), "0", nil)
// Disable the JobSuccessPolicy
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, false)()
// First pod from index 1 succeeded
if err = setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodSucceeded, 1); err != nil {
t.Fatalf("Failed trying to succeess pod with index 1")
}
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Active: 3,
Succeeded: 2,
Ready: ptr.To[int32](0),
Terminating: ptr.To[int32](0),
})
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New(2, 3, 4), "0,1", nil)
// ReEnable the JobSuccessPolicy
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.JobSuccessPolicy, true)()
// First pod from index 2 succeeded
if err = setJobPhaseForIndex(ctx, clientSet, jobObj, v1.PodSucceeded, 2); err != nil {
t.Fatalf("Failed trying to success pod with index 2")
}
// Verify all indexes are terminated as job meets successPolicy.
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Active: 0,
Succeeded: 3,
Ready: ptr.To[int32](0),
Terminating: ptr.To[int32](0),
})
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New[int](), "0-2", nil)
validateJobCondition(ctx, t, clientSet, jobObj, batchv1.JobSuccessCriteriaMet)
validateJobComplete(ctx, t, clientSet, jobObj)
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
}
// TestBackoffLimitPerIndex_DelayedPodDeletion tests the pod deletion is delayed // TestBackoffLimitPerIndex_DelayedPodDeletion tests the pod deletion is delayed
// until the replacement pod is created, so that the replacement pod has the // until the replacement pod is created, so that the replacement pod has the
// index-failure-count annotation bumped, when BackoffLimitPerIndex is used. // index-failure-count annotation bumped, when BackoffLimitPerIndex is used.
@@ -570,7 +938,7 @@ func TestBackoffLimitPerIndex_DelayedPodDeletion(t *testing.T) {
Ready: ptr.To[int32](0), Ready: ptr.To[int32](0),
Terminating: ptr.To[int32](0), Terminating: ptr.To[int32](0),
}) })
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
} }
// TestBackoffLimitPerIndex_Reenabling tests handling of pod failures when // TestBackoffLimitPerIndex_Reenabling tests handling of pod failures when
@@ -766,7 +1134,7 @@ func TestBackoffLimitPerIndex_JobPodsCreatedWithExponentialBackoff(t *testing.T)
Terminating: ptr.To[int32](0), Terminating: ptr.To[int32](0),
}) })
validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New[int](), "0,1", ptr.To("")) validateIndexedJobPods(ctx, t, clientSet, jobObj, sets.New[int](), "0,1", ptr.To(""))
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
for index := 0; index < int(*jobObj.Spec.Completions); index++ { for index := 0; index < int(*jobObj.Spec.Completions); index++ {
podsForIndex, err := getJobPodsForIndex(ctx, clientSet, jobObj, index, func(_ *v1.Pod) bool { return true }) podsForIndex, err := getJobPodsForIndex(ctx, clientSet, jobObj, index, func(_ *v1.Pod) bool { return true })
@@ -1568,7 +1936,7 @@ func TestNonParallelJob(t *testing.T) {
if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 1); err != nil { if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 1); err != nil {
t.Fatalf("Failed setting phase %s on Job Pod: %v", v1.PodSucceeded, err) t.Fatalf("Failed setting phase %s on Job Pod: %v", v1.PodSucceeded, err)
} }
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Failed: 1, Failed: 1,
Succeeded: 1, Succeeded: 1,
@@ -1654,7 +2022,7 @@ func TestParallelJob(t *testing.T) {
if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 2); err != nil { if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 2); err != nil {
t.Fatalf("Failed setting phase %s on Job Pods: %v", v1.PodSucceeded, err) t.Fatalf("Failed setting phase %s on Job Pods: %v", v1.PodSucceeded, err)
} }
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
want = podsByStatus{ want = podsByStatus{
Failed: 4, Failed: 4,
Succeeded: 3, Succeeded: 3,
@@ -1727,7 +2095,7 @@ func TestParallelJobChangingParallelism(t *testing.T) {
if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 4); err != nil { if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 4); err != nil {
t.Fatalf("Failed setting phase %s on Job Pods: %v", v1.PodFailed, err) t.Fatalf("Failed setting phase %s on Job Pods: %v", v1.PodFailed, err)
} }
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{ validateJobPodsStatus(ctx, t, clientSet, jobObj, podsByStatus{
Succeeded: 4, Succeeded: 4,
Ready: ptr.To[int32](0), Ready: ptr.To[int32](0),
@@ -1797,7 +2165,7 @@ func TestParallelJobWithCompletions(t *testing.T) {
if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 3); err != nil { if err, _ := setJobPodsPhase(ctx, clientSet, jobObj, v1.PodSucceeded, 3); err != nil {
t.Fatalf("Failed setting phase %s on Job Pods: %v", v1.PodSucceeded, err) t.Fatalf("Failed setting phase %s on Job Pods: %v", v1.PodSucceeded, err)
} }
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
want = podsByStatus{ want = podsByStatus{
Failed: 2, Failed: 2,
Succeeded: 56, Succeeded: 56,
@@ -1895,7 +2263,7 @@ func TestIndexedJob(t *testing.T) {
Terminating: ptr.To[int32](0), Terminating: ptr.To[int32](0),
}) })
validateIndexedJobPods(ctx, t, clientSet, jobObj, nil, "0-3", nil) validateIndexedJobPods(ctx, t, clientSet, jobObj, nil, "0-3", nil)
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj) validateFinishedPodsNoFinalizer(ctx, t, clientSet, jobObj)
validateTerminatedPodsTrackingFinalizerMetric(ctx, t, 5) validateTerminatedPodsTrackingFinalizerMetric(ctx, t, 5)
validateCounterMetric(ctx, t, metrics.JobFinishedIndexesTotal, metricLabelsWithValue{ validateCounterMetric(ctx, t, metrics.JobFinishedIndexesTotal, metricLabelsWithValue{
@@ -2408,7 +2776,7 @@ func TestElasticIndexedJob(t *testing.T) {
validateIndexedJobPods(ctx, t, clientSet, jobObj, update.wantRemainingIndexes, update.wantSucceededIndexes, nil) validateIndexedJobPods(ctx, t, clientSet, jobObj, update.wantRemainingIndexes, update.wantSucceededIndexes, nil)
} }
validateJobSucceeded(ctx, t, clientSet, jobObj) validateJobComplete(ctx, t, clientSet, jobObj)
}) })
} }
} }
@@ -2486,7 +2854,7 @@ func BenchmarkLargeIndexedJob(b *testing.B) {
}); err != nil { }); err != nil {
b.Fatalf("Could not succeed the remaining %d pods: %v", remaining, err) b.Fatalf("Could not succeed the remaining %d pods: %v", remaining, err)
} }
validateJobSucceeded(ctx, b, clientSet, jobObj) validateJobComplete(ctx, b, clientSet, jobObj)
b.StopTimer() b.StopTimer()
} }
}) })
@@ -3104,7 +3472,7 @@ func validateJobPodsStatus(ctx context.Context, t testing.TB, clientSet clientse
t.Helper() t.Helper()
validateJobsPodsStatusOnly(ctx, t, clientSet, jobObj, desired) validateJobsPodsStatusOnly(ctx, t, clientSet, jobObj, desired)
var active []*v1.Pod var active []*v1.Pod
if err := wait.PollUntilContextTimeout(ctx, waitInterval, wait.ForeverTestTimeout, true, func(ctx context.Context) (bool, error) { if err := wait.PollUntilContextTimeout(ctx, waitInterval, time.Second*5, true, func(ctx context.Context) (bool, error) {
pods, err := clientSet.CoreV1().Pods(jobObj.Namespace).List(ctx, metav1.ListOptions{}) pods, err := clientSet.CoreV1().Pods(jobObj.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to list Job Pods: %v", err) t.Fatalf("Failed to list Job Pods: %v", err)
@@ -3242,7 +3610,7 @@ func validateJobFailed(ctx context.Context, t *testing.T, clientSet clientset.In
validateJobCondition(ctx, t, clientSet, jobObj, batchv1.JobFailed) validateJobCondition(ctx, t, clientSet, jobObj, batchv1.JobFailed)
} }
func validateJobSucceeded(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job) { func validateJobComplete(ctx context.Context, t testing.TB, clientSet clientset.Interface, jobObj *batchv1.Job) {
t.Helper() t.Helper()
validateJobCondition(ctx, t, clientSet, jobObj, batchv1.JobComplete) validateJobCondition(ctx, t, clientSet, jobObj, batchv1.JobComplete)
} }