dra api: rename NodeResourceSlice -> ResourceSlice

While currently those objects only get published by the kubelet for node-local
resources, this could change once we also support network-attached
resources. Dropping the "Node" prefix enables such a future extension.

The NodeName in ResourceSlice and StructuredResourceHandle then becomes
optional. The kubelet still needs to provide one and it must match its own node
name, otherwise it doesn't have permission to access ResourceSlice objects.
This commit is contained in:
Patrick Ohly
2024-03-07 10:14:11 +01:00
parent 42ee56f093
commit 0b6a0d686a
60 changed files with 3868 additions and 3859 deletions

View File

@@ -277,7 +277,7 @@ type dynamicResources struct {
podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister
claimParametersLister resourcev1alpha2listers.ResourceClaimParametersLister
classParametersLister resourcev1alpha2listers.ResourceClassParametersLister
nodeResourceSliceLister resourcev1alpha2listers.NodeResourceSliceLister
resourceSliceLister resourcev1alpha2listers.ResourceSliceLister
claimNameLookup *resourceclaim.Lookup
// claimAssumeCache enables temporarily storing a newer claim object
@@ -295,7 +295,7 @@ type dynamicResources struct {
// assigned to such a claim. Alternatively, claim allocation state
// could also get tracked across pod scheduling cycles, but that
// - adds complexity (need to carefully sync state with informer events
// for claims and NodeResourceSlices)
// for claims and ResourceSlices)
// - would make integration with cluster autoscaler harder because it would need
// to trigger informer callbacks.
//
@@ -353,7 +353,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
podSchedulingContextLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulingContexts().Lister(),
claimParametersLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaimParameters().Lister(),
classParametersLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClassParameters().Lister(),
nodeResourceSliceLister: fh.SharedInformerFactory().Resource().V1alpha2().NodeResourceSlices().Lister(),
resourceSliceLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceSlices().Lister(),
claimNameLookup: resourceclaim.NewNameLookup(fh.ClientSet()),
claimAssumeCache: volumebinding.NewAssumeCache(logger, fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Informer(), "claim", "", nil),
}
@@ -943,7 +943,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
// problems for using the plugin in the Cluster Autoscaler. If
// this step here turns out to be expensive, we may have to
// maintain and update state more persistently.
resources, err := newResourceModel(logger, pl.nodeResourceSliceLister, pl.claimAssumeCache)
resources, err := newResourceModel(logger, pl.resourceSliceLister, pl.claimAssumeCache)
if err != nil {
return nil, statusError(logger, err)
}