Merge pull request #119963 from pohly/dra-scheduler-perf-multiple-claims

dra: scheduler_perf test case with multiple claims per pod
This commit is contained in:
Kubernetes Prow Robot
2023-08-29 00:25:34 -07:00
committed by GitHub
6 changed files with 116 additions and 6 deletions

View File

@@ -0,0 +1,7 @@
apiVersion: resource.k8s.io/v1alpha1
kind: ResourceClaimTemplate
metadata:
name: another-test-claim-template
spec:
spec:
resourceClassName: another-test-class

View File

@@ -0,0 +1,5 @@
apiVersion: resource.k8s.io/v1alpha1
kind: ResourceClass
metadata:
name: another-test-class
driverName: another-test-driver.cdi.k8s.io

View File

@@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
generateName: test-dra
spec:
containers:
- image: registry.k8s.io/pause:3.9
name: pause
resources:
claims:
- name: resource-1a
- name: resource-1b
- name: resource-2a
- name: resource-2b
resourceClaims:
- name: resource-1a
source:
resourceClaimTemplateName: test-claim-template
- name: resource-1b
source:
resourceClaimTemplateName: test-claim-template
- name: resource-2a
source:
resourceClaimTemplateName: another-test-claim-template
- name: resource-2b
source:
resourceClaimTemplateName: another-test-claim-template

View File

@@ -1,5 +0,0 @@
apiVersion: resource.k8s.io/v1alpha1
kind: ResourceClass
metadata:
name: scheduler-performance
driverName: test-driver.cdi.k8s.io

View File

@@ -777,3 +777,79 @@
initPods: 1000
measurePods: 1000
maxClaimsPerNode: 10
# This similar to SchedulingWithResourceClaimTemplate, except
# that it uses four claims per pod, from two different drivers.
# This emphasizes a bit more the complexity of collaborative
# scheduling via PodSchedulingContext.
- name: SchedulingWithMultipleResourceClaims
featureGates:
DynamicResourceAllocation: true
workloadTemplate:
- opcode: createNodes
countParam: $nodesWithoutDRA
- opcode: createNodes
nodeTemplatePath: config/dra/node-with-dra-test-driver.yaml
countParam: $nodesWithDRA
- opcode: createResourceDriver
driverName: test-driver.cdi.k8s.io
nodes: scheduler-perf-dra-*
maxClaimsPerNodeParam: $maxClaimsPerNode
- opcode: createResourceDriver
driverName: another-test-driver.cdi.k8s.io
nodes: scheduler-perf-dra-*
maxClaimsPerNodeParam: $maxClaimsPerNode
- opcode: createResourceClass
templatePath: config/dra/resourceclass.yaml
- opcode: createResourceClass
templatePath: config/dra/another-resourceclass.yaml
- opcode: createResourceClaimTemplate
templatePath: config/dra/resourceclaimtemplate.yaml
namespace: init
- opcode: createResourceClaimTemplate
templatePath: config/dra/another-resourceclaimtemplate.yaml
namespace: init
- opcode: createPods
namespace: init
countParam: $initPods
podTemplatePath: config/dra/pod-with-many-claim-templates.yaml
- opcode: createResourceClaimTemplate
templatePath: config/dra/resourceclaimtemplate.yaml
namespace: test
- opcode: createResourceClaimTemplate
templatePath: config/dra/another-resourceclaimtemplate.yaml
namespace: test
- opcode: createPods
namespace: test
countParam: $measurePods
podTemplatePath: config/dra/pod-with-many-claim-templates.yaml
collectMetrics: true
workloads:
- name: fast
params:
# This testcase runs through all code paths without
# taking too long overall.
nodesWithDRA: 1
nodesWithoutDRA: 1
initPods: 0
measurePods: 1
maxClaimsPerNode: 20
- name: 2000pods_100nodes
params:
# In this testcase, the number of nodes is smaller
# than the limit for the PodScheduling slices.
nodesWithDRA: 100
nodesWithoutDRA: 0
initPods: 1000
measurePods: 1000
maxClaimsPerNode: 40
- name: 2000pods_200nodes
params:
# In this testcase, the driver and scheduler must
# truncate the PotentialNodes and UnsuitableNodes
# slices.
nodesWithDRA: 200
nodesWithoutDRA: 0
initPods: 1000
measurePods: 1000
maxClaimsPerNode: 20

View File

@@ -210,7 +210,7 @@ func (op *createResourceDriverOp) run(ctx context.Context, tb testing.TB, client
}
}
controller := draapp.NewController(clientset, "test-driver.cdi.k8s.io", resources)
controller := draapp.NewController(clientset, op.DriverName, resources)
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
wg.Add(1)