Move pkg/scheduler/algorithm/well_known_labels.go out

This commit is contained in:
tanshanshan
2018-09-28 10:37:38 +08:00
parent e4200cea9c
commit b7c7966b9f
38 changed files with 194 additions and 190 deletions

View File

@@ -71,7 +71,7 @@ go_test(
"//pkg/controller:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/labels:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",

View File

@@ -48,7 +48,7 @@ import (
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/features"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
"k8s.io/kubernetes/pkg/securitycontext"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
)
@@ -78,13 +78,13 @@ func nowPointer() *metav1.Time {
var (
nodeNotReady = []v1.Taint{{
Key: algorithm.TaintNodeNotReady,
Key: schedulerapi.TaintNodeNotReady,
Effect: v1.TaintEffectNoExecute,
TimeAdded: nowPointer(),
}}
nodeUnreachable = []v1.Taint{{
Key: algorithm.TaintNodeUnreachable,
Key: schedulerapi.TaintNodeUnreachable,
Effect: v1.TaintEffectNoExecute,
TimeAdded: nowPointer(),
}}
@@ -529,7 +529,7 @@ func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
}
field := nodeSelector.NodeSelectorTerms[0].MatchFields[0]
if field.Key == algorithm.NodeFieldSelectorKeyNodeName {
if field.Key == schedulerapi.NodeFieldSelectorKeyNodeName {
if field.Operator != v1.NodeSelectorOpIn {
t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn)
}
@@ -1759,7 +1759,7 @@ func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
node := newNode("not-enough-disk", nil)
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
node.Spec.Taints = []v1.Taint{{Key: algorithm.TaintNodeOutOfDisk, Effect: v1.TaintEffectNoSchedule}}
node.Spec.Taints = []v1.Taint{{Key: schedulerapi.TaintNodeOutOfDisk, Effect: v1.TaintEffectNoSchedule}}
manager.nodeStore.Add(node)
// NOTE: Whether or not TaintNodesByCondition is enabled, it'll add toleration to DaemonSet pods.
@@ -1801,8 +1801,8 @@ func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue},
}
node.Spec.Taints = []v1.Taint{
{Key: algorithm.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
{Key: algorithm.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
{Key: schedulerapi.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
{Key: schedulerapi.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
}
manager.nodeStore.Add(node)

View File

@@ -15,7 +15,7 @@ go_library(
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
@@ -45,7 +45,7 @@ go_test(
"//pkg/api/testapi:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@@ -29,7 +29,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
// GetTemplateGeneration gets the template generation associated with a v1.DaemonSet by extracting it from the
@@ -55,7 +55,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
// to survive taint-based eviction enforced by NodeController
// when node turns not ready.
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeNotReady,
Key: schedulerapi.TaintNodeNotReady,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
@@ -65,7 +65,7 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
// to survive taint-based eviction enforced by NodeController
// when node turns unreachable.
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeUnreachable,
Key: schedulerapi.TaintNodeUnreachable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
@@ -74,26 +74,26 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
// MemoryPressure, DisPressure, Unschedulable and NetworkUnavailable taints,
// and the critical pods should tolerate OutOfDisk taint.
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeDiskPressure,
Key: schedulerapi.TaintNodeDiskPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeMemoryPressure,
Key: schedulerapi.TaintNodeMemoryPressure,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeUnschedulable,
Key: schedulerapi.TaintNodeUnschedulable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
if spec.HostNetwork {
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeNetworkUnavailable,
Key: schedulerapi.TaintNodeNetworkUnavailable,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
@@ -102,12 +102,12 @@ func AddOrUpdateDaemonPodTolerations(spec *v1.PodSpec, isCritical bool) {
// TODO(#48843) OutOfDisk taints will be removed in 1.10
if isCritical {
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeOutOfDisk,
Key: schedulerapi.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoExecute,
})
v1helper.AddOrUpdateTolerationInPodSpec(spec, &v1.Toleration{
Key: algorithm.TaintNodeOutOfDisk,
Key: schedulerapi.TaintNodeOutOfDisk,
Operator: v1.TolerationOpExists,
Effect: v1.TaintEffectNoSchedule,
})
@@ -167,7 +167,7 @@ func SplitByAvailablePods(minReadySeconds int32, pods []*v1.Pod) ([]*v1.Pod, []*
// Note that this function assumes that no NodeAffinity conflicts with the selected nodeName.
func ReplaceDaemonSetPodNodeNameNodeAffinity(affinity *v1.Affinity, nodename string) *v1.Affinity {
nodeSelReq := v1.NodeSelectorRequirement{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodename},
}
@@ -236,11 +236,11 @@ func GetTargetNodeName(pod *v1.Pod) (string, error) {
for _, term := range terms {
for _, exp := range term.MatchFields {
if exp.Key == algorithm.NodeFieldSelectorKeyNodeName &&
if exp.Key == schedulerapi.NodeFieldSelectorKeyNodeName &&
exp.Operator == v1.NodeSelectorOpIn {
if len(exp.Values) != 1 {
return "", fmt.Errorf("the matchFields value of '%s' is not unique for pod %s/%s",
algorithm.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
schedulerapi.NodeFieldSelectorKeyNodeName, pod.Namespace, pod.Name)
}
return exp.Values[0], nil

View File

@@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/features"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
)
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
@@ -190,7 +190,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -227,7 +227,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -277,7 +277,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -296,7 +296,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1", "host_2"},
},
@@ -314,7 +314,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -335,7 +335,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -363,7 +363,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_2"},
},
@@ -381,7 +381,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -400,7 +400,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpNotIn,
Values: []string{"host_2"},
},
@@ -418,7 +418,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -458,7 +458,7 @@ func TestReplaceDaemonSetPodNodeNameNodeAffinity(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"host_1"},
},
@@ -529,7 +529,7 @@ func TestGetTargetNodeName(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"node-1"},
},
@@ -557,7 +557,7 @@ func TestGetTargetNodeName(t *testing.T) {
{
MatchFields: []v1.NodeSelectorRequirement{
{
Key: algorithm.NodeFieldSelectorKeyNodeName,
Key: schedulerapi.NodeFieldSelectorKeyNodeName,
Operator: v1.NodeSelectorOpIn,
Values: []string{"node-1", "node-2"},
},