Move out const strings in pkg/scheduler/api/well_known_labels.go
This commit is contained in:
@@ -49,7 +49,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/apis/scheduling"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||
)
|
||||
@@ -75,13 +74,13 @@ func nowPointer() *metav1.Time {
|
||||
|
||||
var (
|
||||
nodeNotReady = []v1.Taint{{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: nowPointer(),
|
||||
}}
|
||||
|
||||
nodeUnreachable = []v1.Taint{{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
TimeAdded: nowPointer(),
|
||||
}}
|
||||
@@ -528,7 +527,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
|
||||
}
|
||||
|
||||
field := nodeSelector.NodeSelectorTerms[0].MatchFields[0]
|
||||
if field.Key == schedulerapi.NodeFieldSelectorKeyNodeName {
|
||||
if field.Key == api.ObjectNameField {
|
||||
if field.Operator != v1.NodeSelectorOpIn {
|
||||
t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn)
|
||||
}
|
||||
@@ -1517,9 +1516,9 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
|
||||
{Type: v1.NodePIDPressure, Status: v1.ConditionTrue},
|
||||
}
|
||||
node.Spec.Taints = []v1.Taint{
|
||||
{Key: schedulerapi.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: schedulerapi.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: v1.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: v1.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
{Key: v1.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule},
|
||||
}
|
||||
manager.nodeStore.Add(node)
|
||||
|
||||
@@ -2161,7 +2160,7 @@ func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-2"},
|
||||
},
|
||||
|
@@ -25,8 +25,8 @@ import (
|
||||
extensions "k8s.io/api/extensions/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
|
||||
@@ -52,7 +52,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec) {
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns not ready.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@@ -62,7 +62,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec) {
|
||||
// to survive taint-based eviction enforced by NodeController
|
||||
// when node turns unreachable.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
})
|
||||
@@ -70,32 +70,32 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec) {
|
||||
// According to TaintNodesByCondition feature, all DaemonSet pods should tolerate
|
||||
// MemoryPressure, DiskPressure, PIDPressure, Unschedulable and NetworkUnavailable taints.
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeDiskPressure,
|
||||
Key: v1.TaintNodeDiskPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeMemoryPressure,
|
||||
Key: v1.TaintNodeMemoryPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodePIDPressure,
|
||||
Key: v1.TaintNodePIDPressure,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
|
||||
if spec.HostNetwork {
|
||||
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Operator: v1.TolerationOpExists,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
@@ -151,7 +151,7 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
|
||||
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
|
||||
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
|
||||
nodeSelReq := v1.NodeSelectorRequirement{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{nodename},
|
||||
}
|
||||
@@ -220,11 +220,11 @@ func GetTargetNodeName(pod *v1.Pod) (string, error) {
|
||||
|
||||
for _, term := range terms {
|
||||
for _, exp := range term.MatchFields {
|
||||
if exp.Key == schedulerapi.NodeFieldSelectorKeyNodeName &&
|
||||
if exp.Key == api.ObjectNameField &&
|
||||
exp.Operator == v1.NodeSelectorOpIn {
|
||||
if len(exp.Values) != 1 {
|
||||
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
|
||||
schedulerapi.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
|
||||
api.ObjectNameField, pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
return exp.Values[0], nil
|
||||
|
@@ -27,7 +27,7 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@@ -185,7 +185,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -222,7 +222,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -272,7 +272,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -291,7 +291,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1", "host_2"},
|
||||
},
|
||||
@@ -309,7 +309,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -330,7 +330,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -358,7 +358,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@@ -376,7 +376,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -395,7 +395,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpNotIn,
|
||||
Values: []string{"host_2"},
|
||||
},
|
||||
@@ -413,7 +413,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -453,7 +453,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"host_1"},
|
||||
},
|
||||
@@ -519,7 +519,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1"},
|
||||
},
|
||||
@@ -547,7 +547,7 @@ func TestGetTargetNodeName(t *testing.T) {
|
||||
{
|
||||
MatchFields: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
|
||||
Key: api.ObjectNameField,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{"node-1", "node-2"},
|
||||
},
|
||||
|
@@ -42,7 +42,6 @@ import (
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
utiltaints "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/legacy-cloud-providers/gce"
|
||||
@@ -117,7 +116,7 @@ func NewCloudCIDRAllocator(client clientset.Interface, cloud cloudprovider.Inter
|
||||
}
|
||||
// Even if PodCIDR is assigned, but NetworkUnavailable condition is
|
||||
// set to true, we need to process the node to set the condition.
|
||||
networkUnavailableTaint := &v1.Taint{Key: schedulerapi.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
networkUnavailableTaint := &v1.Taint{Key: v1.TaintNodeNetworkUnavailable, Effect: v1.TaintEffectNoSchedule}
|
||||
_, cond := nodeutil.GetNodeCondition(&newNode.Status, v1.NodeNetworkUnavailable)
|
||||
if cond == nil || cond.Status != v1.ConditionFalse || utiltaints.TaintExists(newNode.Spec.Taints, networkUnavailableTaint) {
|
||||
return ca.AllocateOrOccupyCIDR(newNode)
|
||||
|
@@ -60,7 +60,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
)
|
||||
@@ -73,14 +72,14 @@ func init() {
|
||||
var (
|
||||
// UnreachableTaintTemplate is the taint for when a node becomes unreachable.
|
||||
UnreachableTaintTemplate = &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
// NotReadyTaintTemplate is the taint for when a node is not ready for
|
||||
// executing pods
|
||||
NotReadyTaintTemplate = &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoExecute,
|
||||
}
|
||||
|
||||
@@ -90,30 +89,30 @@ var (
|
||||
// for certain NodeConditionType, there are multiple {ConditionStatus,TaintKey} pairs
|
||||
nodeConditionToTaintKeyStatusMap = map[v1.NodeConditionType]map[v1.ConditionStatus]string{
|
||||
v1.NodeReady: {
|
||||
v1.ConditionFalse: schedulerapi.TaintNodeNotReady,
|
||||
v1.ConditionUnknown: schedulerapi.TaintNodeUnreachable,
|
||||
v1.ConditionFalse: v1.TaintNodeNotReady,
|
||||
v1.ConditionUnknown: v1.TaintNodeUnreachable,
|
||||
},
|
||||
v1.NodeMemoryPressure: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeMemoryPressure,
|
||||
v1.ConditionTrue: v1.TaintNodeMemoryPressure,
|
||||
},
|
||||
v1.NodeDiskPressure: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeDiskPressure,
|
||||
v1.ConditionTrue: v1.TaintNodeDiskPressure,
|
||||
},
|
||||
v1.NodeNetworkUnavailable: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
v1.ConditionTrue: v1.TaintNodeNetworkUnavailable,
|
||||
},
|
||||
v1.NodePIDPressure: {
|
||||
v1.ConditionTrue: schedulerapi.TaintNodePIDPressure,
|
||||
v1.ConditionTrue: v1.TaintNodePIDPressure,
|
||||
},
|
||||
}
|
||||
|
||||
taintKeyToNodeConditionMap = map[string]v1.NodeConditionType{
|
||||
schedulerapi.TaintNodeNotReady: v1.NodeReady,
|
||||
schedulerapi.TaintNodeUnreachable: v1.NodeReady,
|
||||
schedulerapi.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
schedulerapi.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
schedulerapi.TaintNodeDiskPressure: v1.NodeDiskPressure,
|
||||
schedulerapi.TaintNodePIDPressure: v1.NodePIDPressure,
|
||||
v1.TaintNodeNotReady: v1.NodeReady,
|
||||
v1.TaintNodeUnreachable: v1.NodeReady,
|
||||
v1.TaintNodeNetworkUnavailable: v1.NodeNetworkUnavailable,
|
||||
v1.TaintNodeMemoryPressure: v1.NodeMemoryPressure,
|
||||
v1.TaintNodeDiskPressure: v1.NodeDiskPressure,
|
||||
v1.TaintNodePIDPressure: v1.NodePIDPressure,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -584,7 +583,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
|
||||
if node.Spec.Unschedulable {
|
||||
// If unschedulable, append related taint.
|
||||
taints = append(taints, v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnschedulable,
|
||||
Key: v1.TaintNodeUnschedulable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
})
|
||||
}
|
||||
@@ -596,7 +595,7 @@ func (nc *Controller) doNoScheduleTaintingPass(nodeName string) error {
|
||||
return false
|
||||
}
|
||||
// Find unschedulable taint of node.
|
||||
if t.Key == schedulerapi.TaintNodeUnschedulable {
|
||||
if t.Key == v1.TaintNodeUnschedulable {
|
||||
return true
|
||||
}
|
||||
// Find node condition taints of node.
|
||||
|
@@ -47,7 +47,6 @@ import (
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/utils/pointer"
|
||||
@@ -2879,15 +2878,15 @@ func TestTaintsNodeByCondition(t *testing.T) {
|
||||
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(fakeNodeHandler.Clientset)
|
||||
|
||||
networkUnavailableTaint := &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeNetworkUnavailable,
|
||||
Key: v1.TaintNodeNetworkUnavailable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
notReadyTaint := &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeNotReady,
|
||||
Key: v1.TaintNodeNotReady,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
unreachableTaint := &v1.Taint{
|
||||
Key: schedulerapi.TaintNodeUnreachable,
|
||||
Key: v1.TaintNodeUnreachable,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user