Merge pull request #88059 from msau42/refactor-e2e-node-selection
Refactor e2e node selection
This commit is contained in:
@@ -86,10 +86,7 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P
|
||||
// CreateSecPodWithNodeSelection creates security pod with given claims
|
||||
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) {
|
||||
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup)
|
||||
// Setting node
|
||||
pod.Spec.NodeName = node.Name
|
||||
pod.Spec.NodeSelector = node.Selector
|
||||
pod.Spec.Affinity = node.Affinity
|
||||
SetNodeSelection(pod, node)
|
||||
|
||||
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
|
@@ -87,3 +87,19 @@ func SetNodeAffinity(pod *v1.Pod, nodeName string) {
|
||||
SetAffinity(nodeSelection, nodeName)
|
||||
pod.Spec.Affinity = nodeSelection.Affinity
|
||||
}
|
||||
|
||||
// SetNodeSelection modifies the given pod object with
|
||||
// the specified NodeSelection
|
||||
func SetNodeSelection(pod *v1.Pod, nodeSelection NodeSelection) {
|
||||
pod.Spec.NodeSelector = nodeSelection.Selector
|
||||
pod.Spec.Affinity = nodeSelection.Affinity
|
||||
// pod.Spec.NodeName should not be set directly because
|
||||
// it will bypass the scheduler, potentially causing
|
||||
// kubelet to Fail the pod immediately if it's out of
|
||||
// resources. Instead, we want the pod to remain
|
||||
// pending in the scheduler until the node has resources
|
||||
// freed up.
|
||||
if nodeSelection.Name != "" {
|
||||
SetNodeAffinity(pod, nodeSelection.Name)
|
||||
}
|
||||
}
|
||||
|
@@ -128,12 +128,8 @@ type TestConfig struct {
|
||||
// Wait for the pod to terminate successfully
|
||||
// False indicates that the pod is long running
|
||||
WaitForCompletion bool
|
||||
// ServerNodeName is the spec.nodeName to run server pod on. Default is any node.
|
||||
ServerNodeName string
|
||||
// ClientNodeName is the spec.nodeName to run client pod on. Default is any node.
|
||||
ClientNodeName string
|
||||
// NodeSelector to use in pod spec (server, client and injector pods).
|
||||
NodeSelector map[string]string
|
||||
// ClientNodeSelection restricts where the client pod runs on. Default is any node.
|
||||
ClientNodeSelection e2epod.NodeSelection
|
||||
}
|
||||
|
||||
// Test contains a volume to mount into a client pod and its
|
||||
@@ -297,8 +293,6 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
|
||||
},
|
||||
Volumes: volumes,
|
||||
RestartPolicy: restartPolicy,
|
||||
NodeName: config.ServerNodeName,
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -389,10 +383,9 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
SecurityContext: GeneratePodSecurityContext(fsGroup, seLinuxOptions),
|
||||
Volumes: []v1.Volume{},
|
||||
NodeName: config.ClientNodeName,
|
||||
NodeSelector: config.NodeSelector,
|
||||
},
|
||||
}
|
||||
e2epod.SetNodeSelection(clientPod, config.ClientNodeSelection)
|
||||
|
||||
for i, test := range tests {
|
||||
volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
|
||||
|
Reference in New Issue
Block a user