api: dynamic resource allocation API
This adds a new resource.k8s.io API group with v1alpha1 as version. It contains four new types: resource.ResourceClaim, resource.ResourceClass, resource.ResourceClaimTemplate, and resource.PodScheduling.
This commit is contained in:
parent
5433da0419
commit
5cca60f0b8
@ -283,6 +283,7 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{
|
||||
{Group: "flowcontrol.apiserver.k8s.io", Version: "v1beta1"}: {group: 16100, version: 12},
|
||||
{Group: "flowcontrol.apiserver.k8s.io", Version: "v1alpha1"}: {group: 16100, version: 9},
|
||||
{Group: "internal.apiserver.k8s.io", Version: "v1alpha1"}: {group: 16000, version: 9},
|
||||
{Group: "resource.k8s.io", Version: "v1alpha1"}: {group: 15900, version: 9},
|
||||
// Append a new group to the end of the list if unsure.
|
||||
// You can use min(existing group)-100 as the initial value for a group.
|
||||
// Version can be set to 9 (to have space around) for a new group.
|
||||
|
@ -89,6 +89,7 @@ coordination.k8s.io/v1beta1 \
|
||||
coordination.k8s.io/v1 \
|
||||
discovery.k8s.io/v1 \
|
||||
discovery.k8s.io/v1beta1 \
|
||||
resource.k8s.io/v1alpha1 \
|
||||
extensions/v1beta1 \
|
||||
events.k8s.io/v1 \
|
||||
events.k8s.io/v1beta1 \
|
||||
|
@ -139,6 +139,10 @@ func TestDefaulting(t *testing.T) {
|
||||
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "ClusterRoleBindingList"}: {},
|
||||
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBinding"}: {},
|
||||
{Group: "rbac.authorization.k8s.io", Version: "v1", Kind: "RoleBindingList"}: {},
|
||||
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaim"}: {},
|
||||
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimList"}: {},
|
||||
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimTemplate"}: {},
|
||||
{Group: "resource.k8s.io", Version: "v1alpha1", Kind: "ResourceClaimTemplateList"}: {},
|
||||
{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicy"}: {},
|
||||
{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyList"}: {},
|
||||
{Group: "admissionregistration.k8s.io", Version: "v1alpha1", Kind: "ValidatingAdmissionPolicyBinding"}: {},
|
||||
|
@ -42,6 +42,7 @@ import (
|
||||
networkingfuzzer "k8s.io/kubernetes/pkg/apis/networking/fuzzer"
|
||||
policyfuzzer "k8s.io/kubernetes/pkg/apis/policy/fuzzer"
|
||||
rbacfuzzer "k8s.io/kubernetes/pkg/apis/rbac/fuzzer"
|
||||
resourcefuzzer "k8s.io/kubernetes/pkg/apis/resource/fuzzer"
|
||||
schedulingfuzzer "k8s.io/kubernetes/pkg/apis/scheduling/fuzzer"
|
||||
storagefuzzer "k8s.io/kubernetes/pkg/apis/storage/fuzzer"
|
||||
)
|
||||
@ -101,6 +102,7 @@ var FuzzerFuncs = fuzzer.MergeFuzzerFuncs(
|
||||
autoscalingfuzzer.Funcs,
|
||||
rbacfuzzer.Funcs,
|
||||
policyfuzzer.Funcs,
|
||||
resourcefuzzer.Funcs,
|
||||
certificatesfuzzer.Funcs,
|
||||
admissionregistrationfuzzer.Funcs,
|
||||
storagefuzzer.Funcs,
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/node/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/policy/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||
)
|
||||
|
6
pkg/apis/resource/OWNERS
Normal file
6
pkg/apis/resource/OWNERS
Normal file
@ -0,0 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- bart0sh
|
||||
- klueska
|
||||
- pohly
|
21
pkg/apis/resource/doc.go
Normal file
21
pkg/apis/resource/doc.go
Normal file
@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
|
||||
// Package resource contains the latest (or "internal") version of the
|
||||
// Kubernetes resource API objects.
|
||||
package resource // import "k8s.io/kubernetes/pkg/apis/resource"
|
40
pkg/apis/resource/fuzzer/fuzzer.go
Normal file
40
pkg/apis/resource/fuzzer/fuzzer.go
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fuzzer
|
||||
|
||||
import (
|
||||
fuzz "github.com/google/gofuzz"
|
||||
|
||||
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
// Funcs contains the fuzzer functions for the resource group.
|
||||
var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
return []interface{}{
|
||||
func(obj *resource.ResourceClaimSpec, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(obj) // fuzz self without calling this function again
|
||||
|
||||
// Custom fuzzing for allocation mode: pick one valid mode randomly.
|
||||
modes := []resource.AllocationMode{
|
||||
resource.AllocationModeImmediate,
|
||||
resource.AllocationModeWaitForFirstConsumer,
|
||||
}
|
||||
obj.AllocationMode = modes[c.Rand.Intn(len(modes))]
|
||||
},
|
||||
}
|
||||
}
|
38
pkg/apis/resource/install/install.go
Normal file
38
pkg/apis/resource/install/install.go
Normal file
@ -0,0 +1,38 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package install installs the resource API, making it available as an
|
||||
// option to all of the API encoding/decoding machinery.
|
||||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/apis/resource/v1alpha1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Install(legacyscheme.Scheme)
|
||||
}
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(scheme *runtime.Scheme) {
|
||||
utilruntime.Must(resource.AddToScheme(scheme))
|
||||
utilruntime.Must(v1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(v1alpha1.SchemeGroupVersion))
|
||||
}
|
75
pkg/apis/resource/install/install_test.go
Normal file
75
pkg/apis/resource/install/install_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package install
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
internal "k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
func TestResourceVersioner(t *testing.T) {
|
||||
claim := internal.ResourceClaim{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "10"}}
|
||||
version, err := meta.NewAccessor().ResourceVersion(&claim)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if version != "10" {
|
||||
t.Errorf("unexpected version %v", version)
|
||||
}
|
||||
|
||||
claimList := internal.ResourceClaimList{ListMeta: metav1.ListMeta{ResourceVersion: "10"}}
|
||||
version, err = meta.NewAccessor().ResourceVersion(&claimList)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if version != "10" {
|
||||
t.Errorf("unexpected version %v", version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCodec(t *testing.T) {
|
||||
claim := internal.ResourceClaim{}
|
||||
data, err := runtime.Encode(legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: "resource.k8s.io", Version: "v1alpha1"}), &claim)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
other := internal.ResourceClaim{}
|
||||
if err := json.Unmarshal(data, &other); err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if other.APIVersion != "resource.k8s.io/v1alpha1" || other.Kind != "ResourceClaim" {
|
||||
t.Errorf("unexpected unmarshalled object %#v", other)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnversioned(t *testing.T) {
|
||||
for _, obj := range []runtime.Object{
|
||||
&metav1.Status{},
|
||||
} {
|
||||
if unversioned, ok := legacyscheme.Scheme.IsUnversioned(obj); !unversioned || !ok {
|
||||
t.Errorf("%v is expected to be unversioned", reflect.TypeOf(obj))
|
||||
}
|
||||
}
|
||||
}
|
66
pkg/apis/resource/register.go
Normal file
66
pkg/apis/resource/register.go
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "resource.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder object to register various known types
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
|
||||
// AddToScheme represents a func that can be used to apply all the registered
|
||||
// funcs in a scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil {
|
||||
return err
|
||||
}
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&ResourceClass{},
|
||||
&ResourceClassList{},
|
||||
&ResourceClaim{},
|
||||
&ResourceClaimList{},
|
||||
&ResourceClaimTemplate{},
|
||||
&ResourceClaimTemplateList{},
|
||||
&PodScheduling{},
|
||||
&PodSchedulingList{},
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
404
pkg/apis/resource/types.go
Normal file
404
pkg/apis/resource/types.go
Normal file
@ -0,0 +1,404 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resource
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResourceClaim describes which resources are needed by a resource consumer.
|
||||
// Its status tracks whether the resource has been allocated and what the
|
||||
// resulting attributes are.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type ResourceClaim struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec describes the desired attributes of a resource that then needs
|
||||
// to be allocated. It can only be set once when creating the
|
||||
// ResourceClaim.
|
||||
Spec ResourceClaimSpec
|
||||
|
||||
// Status describes whether the resource is available and with which
|
||||
// attributes.
|
||||
// +optional
|
||||
Status ResourceClaimStatus
|
||||
}
|
||||
|
||||
// ResourceClaimSpec defines how a resource is to be allocated.
|
||||
type ResourceClaimSpec struct {
|
||||
// ResourceClassName references the driver and additional parameters
|
||||
// via the name of a ResourceClass that was created as part of the
|
||||
// driver deployment.
|
||||
ResourceClassName string
|
||||
|
||||
// ParametersRef references a separate object with arbitrary parameters
|
||||
// that will be used by the driver when allocating a resource for the
|
||||
// claim.
|
||||
//
|
||||
// The object must be in the same namespace as the ResourceClaim.
|
||||
// +optional
|
||||
ParametersRef *ResourceClaimParametersReference
|
||||
|
||||
// Allocation can start immediately or when a Pod wants to use the
|
||||
// resource. "WaitForFirstConsumer" is the default.
|
||||
// +optional
|
||||
AllocationMode AllocationMode
|
||||
}
|
||||
|
||||
// AllocationMode describes whether a ResourceClaim gets allocated immediately
|
||||
// when it gets created (AllocationModeImmediate) or whether allocation is
|
||||
// delayed until it is needed for a Pod
|
||||
// (AllocationModeWaitForFirstConsumer). Other modes might get added in the
|
||||
// future.
|
||||
type AllocationMode string
|
||||
|
||||
const (
|
||||
// When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is
|
||||
// delayed until a Pod gets scheduled that needs the ResourceClaim. The
|
||||
// scheduler will consider all resource requirements of that Pod and
|
||||
// trigger allocation for a node that fits the Pod.
|
||||
AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer"
|
||||
|
||||
// When a ResourceClaim has AllocationModeImmediate, allocation starts
|
||||
// as soon as the ResourceClaim gets created. This is done without
|
||||
// considering the needs of Pods that will use the ResourceClaim
|
||||
// because those Pods are not known yet.
|
||||
AllocationModeImmediate AllocationMode = "Immediate"
|
||||
)
|
||||
|
||||
// ResourceClaimStatus tracks whether the resource has been allocated and what
|
||||
// the resulting attributes are.
|
||||
type ResourceClaimStatus struct {
|
||||
// DriverName is a copy of the driver name from the ResourceClass at
|
||||
// the time when allocation started.
|
||||
// +optional
|
||||
DriverName string
|
||||
|
||||
// Allocation is set by the resource driver once a resource has been
|
||||
// allocated successfully. If this is not specified, the resource is
|
||||
// not yet allocated.
|
||||
// +optional
|
||||
Allocation *AllocationResult
|
||||
|
||||
// ReservedFor indicates which entities are currently allowed to use
|
||||
// the claim. A Pod which references a ResourceClaim which is not
|
||||
// reserved for that Pod will not be started.
|
||||
//
|
||||
// There can be at most 32 such reservations. This may get increased in
|
||||
// the future, but not reduced.
|
||||
// +optional
|
||||
ReservedFor []ResourceClaimConsumerReference
|
||||
|
||||
// DeallocationRequested indicates that a ResourceClaim is to be
|
||||
// deallocated.
|
||||
//
|
||||
// The driver then must deallocate this claim and reset the field
|
||||
// together with clearing the Allocation field.
|
||||
//
|
||||
// While DeallocationRequested is set, no new consumers may be added to
|
||||
// ReservedFor.
|
||||
// +optional
|
||||
DeallocationRequested bool
|
||||
}
|
||||
|
||||
// ReservedForMaxSize is the maximum number of entries in
|
||||
// claim.status.reservedFor.
|
||||
const ResourceClaimReservedForMaxSize = 32
|
||||
|
||||
// AllocationResult contains attributed of an allocated resource.
|
||||
type AllocationResult struct {
|
||||
// ResourceHandle contains arbitrary data returned by the driver after a
|
||||
// successful allocation. This is opaque for
|
||||
// Kubernetes. Driver documentation may explain to users how to
|
||||
// interpret this data if needed.
|
||||
//
|
||||
// The maximum size of this field is 16KiB. This may get
|
||||
// increased in the future, but not reduced.
|
||||
// +optional
|
||||
ResourceHandle string
|
||||
|
||||
// This field will get set by the resource driver after it has
|
||||
// allocated the resource driver to inform the scheduler where it can
|
||||
// schedule Pods using the ResourceClaim.
|
||||
//
|
||||
// Setting this field is optional. If null, the resource is available
|
||||
// everywhere.
|
||||
// +optional
|
||||
AvailableOnNodes *core.NodeSelector
|
||||
|
||||
// Shareable determines whether the resource supports more
|
||||
// than one consumer at a time.
|
||||
// +optional
|
||||
Shareable bool
|
||||
}
|
||||
|
||||
// ResourceHandleMaxSize is the maximum size of allocation.resourceHandle.
|
||||
const ResourceHandleMaxSize = 16 * 1024
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResourceClaimList is a collection of claims.
|
||||
type ResourceClaimList struct {
|
||||
metav1.TypeMeta
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of resource claims.
|
||||
Items []ResourceClaim
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodScheduling objects hold information that is needed to schedule
|
||||
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
|
||||
// mode.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type PodScheduling struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec describes where resources for the Pod are needed.
|
||||
Spec PodSchedulingSpec
|
||||
|
||||
// Status describes where resources for the Pod can be allocated.
|
||||
Status PodSchedulingStatus
|
||||
}
|
||||
|
||||
// PodSchedulingSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingSpec struct {
|
||||
// SelectedNode is the node for which allocation of ResourceClaims that
|
||||
// are referenced by the Pod and that use "WaitForFirstConsumer"
|
||||
// allocation is to be attempted.
|
||||
SelectedNode string
|
||||
|
||||
// PotentialNodes lists nodes where the Pod might be able to run.
|
||||
//
|
||||
// The size of this field is limited to 128. This is large enough for
|
||||
// many clusters. Larger clusters may need more attempts to find a node
|
||||
// that suits all pending resources. This may get increased in the
|
||||
// future, but not reduced.
|
||||
// +optional
|
||||
PotentialNodes []string
|
||||
}
|
||||
|
||||
// PodSchedulingStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingStatus struct {
|
||||
// ResourceClaims describes resource availability for each
|
||||
// pod.spec.resourceClaim entry where the corresponding ResourceClaim
|
||||
// uses "WaitForFirstConsumer" allocation mode.
|
||||
// +optional
|
||||
ResourceClaims []ResourceClaimSchedulingStatus
|
||||
|
||||
// If there ever is a need to support other kinds of resources
|
||||
// than ResourceClaim, then new fields could get added here
|
||||
// for those other resources.
|
||||
}
|
||||
|
||||
// ResourceClaimSchedulingStatus contains information about one particular
|
||||
// ResourceClaim with "WaitForFirstConsumer" allocation mode.
|
||||
type ResourceClaimSchedulingStatus struct {
|
||||
// Name matches the pod.spec.resourceClaims[*].Name field.
|
||||
Name string
|
||||
|
||||
// UnsuitableNodes lists nodes that the ResourceClaim cannot be
|
||||
// allocated for.
|
||||
//
|
||||
// The size of this field is limited to 128, the same as for
|
||||
// PodSchedulingSpec.PotentialNodes. This may get increased in the
|
||||
// future, but not reduced.
|
||||
// +optional
|
||||
UnsuitableNodes []string
|
||||
}
|
||||
|
||||
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
|
||||
// node lists that are stored in PodScheduling objects. This limit is part
|
||||
// of the API.
|
||||
const PodSchedulingNodeListMaxSize = 128
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// PodSchedulingList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingList struct {
|
||||
metav1.TypeMeta
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of PodScheduling objects.
|
||||
Items []PodScheduling
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResourceClass is used by administrators to influence how resources
|
||||
// are allocated.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type ResourceClass struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// DriverName defines the name of the dynamic resource driver that is
|
||||
// used for allocation of a ResourceClaim that uses this class.
|
||||
//
|
||||
// Resource drivers have a unique name in forward domain order
|
||||
// (acme.example.com).
|
||||
DriverName string
|
||||
|
||||
// ParametersRef references an arbitrary separate object that may hold
|
||||
// parameters that will be used by the driver when allocating a
|
||||
// resource that uses this class. A dynamic resource driver can
|
||||
// distinguish between parameters stored here and and those stored in
|
||||
// ResourceClaimSpec.
|
||||
// +optional
|
||||
ParametersRef *ResourceClassParametersReference
|
||||
|
||||
// Only nodes matching the selector will be considered by the scheduler
|
||||
// when trying to find a Node that fits a Pod when that Pod uses
|
||||
// a ResourceClaim that has not been allocated yet.
|
||||
//
|
||||
// Setting this field is optional. If null, all nodes are candidates.
|
||||
// +optional
|
||||
SuitableNodes *core.NodeSelector
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResourceClassList is a collection of classes.
|
||||
type ResourceClassList struct {
|
||||
metav1.TypeMeta
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of resource classes.
|
||||
Items []ResourceClass
|
||||
}
|
||||
|
||||
// ResourceClassParametersReference contains enough information to let you
|
||||
// locate the parameters for a ResourceClass.
|
||||
type ResourceClassParametersReference struct {
|
||||
// APIGroup is the group for the resource being referenced. It is
|
||||
// empty for the core API. This matches the group in the APIVersion
|
||||
// that is used when creating the resources.
|
||||
// +optional
|
||||
APIGroup string
|
||||
// Kind is the type of resource being referenced. This is the same
|
||||
// value as in the parameter object's metadata.
|
||||
Kind string
|
||||
// Name is the name of resource being referenced.
|
||||
Name string
|
||||
// Namespace that contains the referenced resource. Must be empty
|
||||
// for cluster-scoped resources and non-empty for namespaced
|
||||
// resources.
|
||||
// +optional
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// ResourceClaimParametersReference contains enough information to let you
|
||||
// locate the parameters for a ResourceClaim. The object must be in the same
|
||||
// namespace as the ResourceClaim.
|
||||
type ResourceClaimParametersReference struct {
|
||||
// APIGroup is the group for the resource being referenced. It is
|
||||
// empty for the core API. This matches the group in the APIVersion
|
||||
// that is used when creating the resources.
|
||||
// +optional
|
||||
APIGroup string
|
||||
// Kind is the type of resource being referenced. This is the same
|
||||
// value as in the parameter object's metadata, for example "ConfigMap".
|
||||
Kind string
|
||||
// Name is the name of resource being referenced.
|
||||
Name string
|
||||
}
|
||||
|
||||
// ResourceClaimConsumerReference contains enough information to let you
|
||||
// locate the consumer of a ResourceClaim. The user must be a resource in the same
|
||||
// namespace as the ResourceClaim.
|
||||
type ResourceClaimConsumerReference struct {
|
||||
// APIGroup is the group for the resource being referenced. It is
|
||||
// empty for the core API. This matches the group in the APIVersion
|
||||
// that is used when creating the resources.
|
||||
// +optional
|
||||
APIGroup string
|
||||
// Resource is the type of resource being referenced, for example "pods".
|
||||
Resource string
|
||||
// Name is the name of resource being referenced.
|
||||
Name string
|
||||
// UID identifies exactly one incarnation of the resource.
|
||||
UID types.UID
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResourceClaimTemplate is used to produce ResourceClaim objects.
|
||||
type ResourceClaimTemplate struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Describes the ResourceClaim that is to be generated.
|
||||
//
|
||||
// This field is immutable. A ResourceClaim will get created by the
|
||||
// control plane for a Pod when needed and then not get updated
|
||||
// anymore.
|
||||
Spec ResourceClaimTemplateSpec
|
||||
}
|
||||
|
||||
// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
|
||||
type ResourceClaimTemplateSpec struct {
|
||||
// ObjectMeta may contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec for the ResourceClaim. The entire content is copied unchanged
|
||||
// into the ResourceClaim that gets created from this template. The
|
||||
// same fields as in a ResourceClaim are also valid here.
|
||||
Spec ResourceClaimSpec
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ResourceClaimTemplateList is a collection of claim templates.
|
||||
type ResourceClaimTemplateList struct {
|
||||
metav1.TypeMeta
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of resource claim templates.
|
||||
Items []ResourceClaimTemplate
|
||||
}
|
25
pkg/apis/resource/v1alpha1/conversion.go
Normal file
25
pkg/apis/resource/v1alpha1/conversion.go
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addConversionFuncs(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
32
pkg/apis/resource/v1alpha1/defaults.go
Normal file
32
pkg/apis/resource/v1alpha1/defaults.go
Normal file
@ -0,0 +1,32 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/resource/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
|
||||
func SetDefaults_ResourceClaimSpec(obj *v1alpha1.ResourceClaimSpec) {
|
||||
if obj.AllocationMode == "" {
|
||||
obj.AllocationMode = v1alpha1.AllocationModeWaitForFirstConsumer
|
||||
}
|
||||
}
|
75
pkg/apis/resource/v1alpha1/defaults_test.go
Normal file
75
pkg/apis/resource/v1alpha1/defaults_test.go
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1alpha1 "k8s.io/api/resource/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
// ensure types are installed
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
)
|
||||
|
||||
func TestSetDefaultAllocationMode(t *testing.T) {
|
||||
claim := &v1alpha1.ResourceClaim{}
|
||||
|
||||
// field should be defaulted
|
||||
defaultMode := v1alpha1.AllocationModeWaitForFirstConsumer
|
||||
output := roundTrip(t, runtime.Object(claim)).(*v1alpha1.ResourceClaim)
|
||||
outMode := output.Spec.AllocationMode
|
||||
if outMode != defaultMode {
|
||||
t.Errorf("Expected AllocationMode to be defaulted to: %+v, got: %+v", defaultMode, outMode)
|
||||
}
|
||||
|
||||
// field should not change
|
||||
nonDefaultMode := v1alpha1.AllocationModeImmediate
|
||||
claim = &v1alpha1.ResourceClaim{
|
||||
Spec: v1alpha1.ResourceClaimSpec{
|
||||
AllocationMode: nonDefaultMode,
|
||||
},
|
||||
}
|
||||
output = roundTrip(t, runtime.Object(claim)).(*v1alpha1.ResourceClaim)
|
||||
outMode = output.Spec.AllocationMode
|
||||
if outMode != v1alpha1.AllocationModeImmediate {
|
||||
t.Errorf("Expected AllocationMode to remain %+v, got: %+v", nonDefaultMode, outMode)
|
||||
}
|
||||
}
|
||||
|
||||
func roundTrip(t *testing.T, obj runtime.Object) runtime.Object {
|
||||
codec := legacyscheme.Codecs.LegacyCodec(v1alpha1.SchemeGroupVersion)
|
||||
data, err := runtime.Encode(codec, obj)
|
||||
if err != nil {
|
||||
t.Errorf("%v\n %#v", err, obj)
|
||||
return nil
|
||||
}
|
||||
obj2, err := runtime.Decode(codec, data)
|
||||
if err != nil {
|
||||
t.Errorf("%v\nData: %s\nSource: %#v", err, string(data), obj)
|
||||
return nil
|
||||
}
|
||||
obj3 := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(runtime.Object)
|
||||
err = legacyscheme.Scheme.Convert(obj2, obj3, nil)
|
||||
if err != nil {
|
||||
t.Errorf("%v\nSource: %#v", err, obj2)
|
||||
return nil
|
||||
}
|
||||
return obj3
|
||||
}
|
23
pkg/apis/resource/v1alpha1/doc.go
Normal file
23
pkg/apis/resource/v1alpha1/doc.go
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/resource
|
||||
// +k8s:conversion-gen-external-types=k8s.io/api/resource/v1alpha1
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +k8s:defaulter-gen-input=k8s.io/api/resource/v1alpha1
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the resource API.
|
||||
package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/resource/v1alpha1"
|
46
pkg/apis/resource/v1alpha1/register.go
Normal file
46
pkg/apis/resource/v1alpha1/register.go
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/resource/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
localSchemeBuilder = &v1alpha1.SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addDefaultingFuncs, addConversionFuncs)
|
||||
}
|
||||
|
||||
// TODO: remove these global variables
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "resource.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
317
pkg/apis/resource/validation/validation.go
Normal file
317
pkg/apis/resource/validation/validation.go
Normal file
@ -0,0 +1,317 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
// validateResourceClaimName can be used to check whether the given
|
||||
// name for a ResourceClaim is valid.
|
||||
var validateResourceClaimName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// validateResourceClaimTemplateName can be used to check whether the given
|
||||
// name for a ResourceClaimTemplate is valid.
|
||||
var validateResourceClaimTemplateName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// validateResourceDriverName reuses the validation of a CSI driver because
|
||||
// the allowed values are exactly the same.
|
||||
var validateResourceDriverName = corevalidation.ValidateCSIDriverName
|
||||
|
||||
// ValidateClaim validates a ResourceClaim.
|
||||
func ValidateClaim(resourceClaim *resource.ResourceClaim) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, validateResourceClaimName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateResourceClaimSpec(&resourceClaim.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateResourceClaimSpec(spec *resource.ResourceClaimSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for _, msg := range corevalidation.ValidateClassName(spec.ResourceClassName, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClassName"), spec.ResourceClassName, msg))
|
||||
}
|
||||
allErrs = append(allErrs, validateResourceClaimParameters(spec.ParametersRef, fldPath.Child("parametersRef"))...)
|
||||
if !supportedAllocationModes.Has(string(spec.AllocationMode)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("allocationMode"), spec.AllocationMode, supportedAllocationModes.List()))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
var supportedAllocationModes = sets.NewString(string(resource.AllocationModeImmediate), string(resource.AllocationModeWaitForFirstConsumer))
|
||||
|
||||
// It would have been nice to use Go generics to reuse the same validation
|
||||
// function for Kind and Name in both types, but generics cannot be used to
|
||||
// access common fields in structs.
|
||||
|
||||
func validateResourceClaimParameters(ref *resource.ResourceClaimParametersReference, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if ref != nil {
|
||||
if ref.Kind == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
|
||||
}
|
||||
if ref.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateClassParameters(ref *resource.ResourceClassParametersReference, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if ref != nil {
|
||||
if ref.Kind == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("kind"), ""))
|
||||
}
|
||||
if ref.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
|
||||
}
|
||||
if ref.Namespace != "" {
|
||||
for _, msg := range apimachineryvalidation.ValidateNamespaceName(ref.Namespace, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), ref.Namespace, msg))
|
||||
}
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClass validates a ResourceClass.
|
||||
func ValidateClass(resourceClass *resource.ResourceClass) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&resourceClass.ObjectMeta, false, corevalidation.ValidateClassName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateResourceDriverName(resourceClass.DriverName, field.NewPath("driverName"))...)
|
||||
allErrs = append(allErrs, validateClassParameters(resourceClass.ParametersRef, field.NewPath("parametersRef"))...)
|
||||
if resourceClass.SuitableNodes != nil {
|
||||
allErrs = append(allErrs, corevalidation.ValidateNodeSelector(resourceClass.SuitableNodes, field.NewPath("suitableNodes"))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClassUpdate tests if an update to ResourceClass is valid.
|
||||
func ValidateClassUpdate(resourceClass, oldClass *resource.ResourceClass) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClass.ObjectMeta, &oldClass.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateClass(resourceClass)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClaimUpdate tests if an update to ResourceClaim is valid.
|
||||
func ValidateClaimUpdate(resourceClaim, oldClaim *resource.ResourceClaim) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldClaim.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(resourceClaim.Spec, oldClaim.Spec, field.NewPath("spec"))...)
|
||||
allErrs = append(allErrs, ValidateClaim(resourceClaim)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClaimStatusUpdate tests if an update to the status of a ResourceClaim is valid.
|
||||
func ValidateClaimStatusUpdate(resourceClaim, oldClaim *resource.ResourceClaim) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldClaim.ObjectMeta, field.NewPath("metadata"))
|
||||
fldPath := field.NewPath("status")
|
||||
// The name might not be set yet.
|
||||
if resourceClaim.Status.DriverName != "" {
|
||||
allErrs = append(allErrs, validateResourceDriverName(resourceClaim.Status.DriverName, fldPath.Child("driverName"))...)
|
||||
} else if resourceClaim.Status.Allocation != nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("driverName"), "must be specified when `allocation` is set"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateAllocationResult(resourceClaim.Status.Allocation, fldPath.Child("allocation"))...)
|
||||
allErrs = append(allErrs, validateSliceIsASet(resourceClaim.Status.ReservedFor, resource.ResourceClaimReservedForMaxSize,
|
||||
validateResourceClaimUserReference, fldPath.Child("reservedFor"))...)
|
||||
|
||||
// Now check for invariants that must be valid for a ResourceClaim.
|
||||
if len(resourceClaim.Status.ReservedFor) > 0 {
|
||||
if resourceClaim.Status.Allocation == nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("reservedFor"), "may not be specified when `allocated` is not set"))
|
||||
} else {
|
||||
if !resourceClaim.Status.Allocation.Shareable && len(resourceClaim.Status.ReservedFor) > 1 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("reservedFor"), "may not be reserved more than once"))
|
||||
}
|
||||
// Items may be removed from ReservedFor while the claim is meant to be deallocated,
|
||||
// but not added.
|
||||
if resourceClaim.DeletionTimestamp != nil || resourceClaim.Status.DeallocationRequested {
|
||||
oldSet := sets.New(oldClaim.Status.ReservedFor...)
|
||||
newSet := sets.New(resourceClaim.Status.ReservedFor...)
|
||||
newItems := newSet.Difference(oldSet)
|
||||
if len(newItems) > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("reservedFor"), "new entries may not be added while `deallocationRequested` or `deletionTimestamp` are set"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !oldClaim.Status.DeallocationRequested &&
|
||||
resourceClaim.Status.DeallocationRequested &&
|
||||
len(resourceClaim.Status.ReservedFor) > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("deallocationRequested"), "deallocation cannot be requested while `reservedFor` is set"))
|
||||
}
|
||||
|
||||
if resourceClaim.Status.Allocation == nil &&
|
||||
resourceClaim.Status.DeallocationRequested {
|
||||
// Either one or the other field was modified incorrectly.
|
||||
// For the sake of simplicity this only reports the invalid
|
||||
// end result.
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath, "`allocation` must be set when `deallocationRequested` is set"))
|
||||
}
|
||||
|
||||
// Once deallocation has been requested, that request cannot be removed
|
||||
// anymore because the deallocation may already have started. The field
|
||||
// can only get reset by the driver together with removing the
|
||||
// allocation.
|
||||
if oldClaim.Status.DeallocationRequested &&
|
||||
!resourceClaim.Status.DeallocationRequested &&
|
||||
resourceClaim.Status.Allocation != nil {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("deallocationRequested"), "may not be cleared when `allocation` is set"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateAllocationResult(allocation *resource.AllocationResult, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if allocation != nil {
|
||||
if len(allocation.ResourceHandle) > resource.ResourceHandleMaxSize {
|
||||
allErrs = append(allErrs, field.TooLongMaxLength(fldPath.Child("resourceHandle"), len(allocation.ResourceHandle), resource.ResourceHandleMaxSize))
|
||||
}
|
||||
if allocation.AvailableOnNodes != nil {
|
||||
allErrs = append(allErrs, corevalidation.ValidateNodeSelector(allocation.AvailableOnNodes, fldPath.Child("availableOnNodes"))...)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateResourceClaimUserReference(ref resource.ResourceClaimConsumerReference, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if ref.Resource == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("resource"), ""))
|
||||
}
|
||||
if ref.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
|
||||
}
|
||||
if ref.UID == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("uid"), ""))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateSliceIsASet ensures that a slice contains no duplicates and does not exceed a certain maximum size.
|
||||
func validateSliceIsASet[T comparable](slice []T, maxSize int, validateItem func(item T, fldPath *field.Path) field.ErrorList, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allItems := sets.New[T]()
|
||||
for i, item := range slice {
|
||||
idxPath := fldPath.Index(i)
|
||||
if allItems.Has(item) {
|
||||
allErrs = append(allErrs, field.Duplicate(idxPath, item))
|
||||
} else {
|
||||
allErrs = append(allErrs, validateItem(item, idxPath)...)
|
||||
allItems.Insert(item)
|
||||
}
|
||||
}
|
||||
if len(slice) > maxSize {
|
||||
// Dumping the entire field into the error message is likely to be too long,
|
||||
// in particular when it is already beyond the maximum size. Instead this
|
||||
// just shows the number of entries.
|
||||
allErrs = append(allErrs, field.TooLongMaxLength(fldPath, len(slice), maxSize))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodScheduling validates a PodScheduling.
|
||||
func ValidatePodScheduling(resourceClaim *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&resourceClaim.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingSpec(&resourceClaim.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodSchedulingSpec(spec *resource.PodSchedulingSpec, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
// Checking PotentialNodes for duplicates is intentionally not done. It
|
||||
// could be fairly expensive and the only component which normally has
|
||||
// permissions to set this field, kube-scheduler, is a trusted
|
||||
// component. Also, if it gets this wrong because of a bug, then the
|
||||
// effect is limited (same semantic).
|
||||
if len(spec.PotentialNodes) > resource.PodSchedulingNodeListMaxSize {
|
||||
allErrs = append(allErrs, field.TooLongMaxLength(fldPath.Child("potentialNodes"), nil, resource.PodSchedulingNodeListMaxSize))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSchedulingUpdate tests if an update to PodScheduling is valid.
|
||||
func ValidatePodSchedulingUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidatePodScheduling(resourceClaim)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSchedulingStatusUpdate tests if an update to the status of a PodScheduling is valid.
|
||||
func ValidatePodSchedulingStatusUpdate(resourceClaim, oldPodScheduling *resource.PodScheduling) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaim.ObjectMeta, &oldPodScheduling.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validatePodSchedulingStatus(&resourceClaim.Status, field.NewPath("status"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodSchedulingStatus(status *resource.PodSchedulingStatus, fldPath *field.Path) field.ErrorList {
|
||||
return validatePodSchedulingClaims(status.ResourceClaims, fldPath.Child("claims"))
|
||||
}
|
||||
|
||||
func validatePodSchedulingClaims(claimStatuses []resource.ResourceClaimSchedulingStatus, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
names := sets.NewString()
|
||||
for i, claimStatus := range claimStatuses {
|
||||
allErrs = append(allErrs, validatePodSchedulingClaim(claimStatus, fldPath.Index(i))...)
|
||||
if names.Has(claimStatus.Name) {
|
||||
allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), claimStatus.Name))
|
||||
} else {
|
||||
names.Insert(claimStatus.Name)
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePodSchedulingClaim(claim resource.ResourceClaimSchedulingStatus, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
// Checking UnsuitableNodes for duplicates is intentionally not done. It
|
||||
// could be fairly expensive and if a resource driver gets this wrong,
|
||||
// then it is only going to have a negative effect for the pods relying
|
||||
// on this driver.
|
||||
if len(claim.UnsuitableNodes) > resource.PodSchedulingNodeListMaxSize {
|
||||
allErrs = append(allErrs, field.TooLongMaxLength(fldPath.Child("unsuitableNodes"), nil, resource.PodSchedulingNodeListMaxSize))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClaimTemplace validates a ResourceClaimTemplate.
|
||||
func ValidateClaimTemplate(template *resource.ResourceClaimTemplate) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMeta(&template.ObjectMeta, true, validateResourceClaimTemplateName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, validateResourceClaimTemplateSpec(&template.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateResourceClaimTemplateSpec(spec *resource.ResourceClaimTemplateSpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateTemplateObjectMeta(&spec.ObjectMeta, fldPath.Child("metadata"))
|
||||
allErrs = append(allErrs, validateResourceClaimSpec(&spec.Spec, fldPath.Child("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClaimTemplateUpdate tests if an update to template is valid.
|
||||
func ValidateClaimTemplateUpdate(template, oldTemplate *resource.ResourceClaimTemplate) field.ErrorList {
|
||||
allErrs := corevalidation.ValidateObjectMetaUpdate(&template.ObjectMeta, &oldTemplate.ObjectMeta, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(template.Spec, oldTemplate.Spec, field.NewPath("spec"))...)
|
||||
allErrs = append(allErrs, ValidateClaimTemplate(template)...)
|
||||
return allErrs
|
||||
}
|
312
pkg/apis/resource/validation/validation_podscheduling_test.go
Normal file
312
pkg/apis/resource/validation/validation_podscheduling_test.go
Normal file
@ -0,0 +1,312 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testPodScheduling(name, namespace string, spec resource.PodSchedulingSpec) *resource.PodScheduling {
|
||||
return &resource.PodScheduling{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodScheduling(t *testing.T) {
|
||||
goodName := "foo"
|
||||
goodNS := "ns"
|
||||
goodPodSchedulingSpec := resource.PodSchedulingSpec{}
|
||||
now := metav1.Now()
|
||||
badName := "!@#$%^"
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
scheduling *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-scheduling": {
|
||||
scheduling: testPodScheduling(goodName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
scheduling: testPodScheduling("", goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
scheduling: testPodScheduling(badName, goodNS, goodPodSchedulingSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
|
||||
scheduling: testPodScheduling(goodName, "", goodPodSchedulingSpec),
|
||||
},
|
||||
"generate-name": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.GenerateName = "pvc-"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.ResourceVersion = "1"
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Generation = 100
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.CreationTimestamp = now
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
scheduling: func() *resource.PodScheduling {
|
||||
scheduling := testPodScheduling(goodName, goodNS, goodPodSchedulingSpec)
|
||||
scheduling.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return scheduling
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidatePodScheduling(scenario.scheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingUpdate(t *testing.T) {
|
||||
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodScheduling
|
||||
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
|
||||
},
|
||||
"add-selected-node": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Spec.SelectedNode = "worker1"
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"add-potential-nodes": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-potential-nodes": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), nil, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
scheduling.Spec.PotentialNodes = append(scheduling.Spec.PotentialNodes, fmt.Sprintf("worker%d", i))
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePodSchedulingStatusUpdate(t *testing.T) {
|
||||
validScheduling := testPodScheduling("foo", "ns", resource.PodSchedulingSpec{})
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldScheduling *resource.PodScheduling
|
||||
update func(scheduling *resource.PodScheduling) *resource.PodScheduling
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling { return scheduling },
|
||||
},
|
||||
"add-claim-status": {
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ {
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-duplicated-claim-status": {
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
for i := 0; i < 2; i++ {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{Name: "my-claim"},
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
"invalid-too-long-claim-status": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), nil, resource.PodSchedulingNodeListMaxSize)},
|
||||
oldScheduling: validScheduling,
|
||||
update: func(scheduling *resource.PodScheduling) *resource.PodScheduling {
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ {
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(
|
||||
scheduling.Status.ResourceClaims[0].UnsuitableNodes,
|
||||
fmt.Sprintf("worker%d", i),
|
||||
)
|
||||
}
|
||||
return scheduling
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldScheduling.ResourceVersion = "1"
|
||||
errs := ValidatePodSchedulingStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
629
pkg/apis/resource/validation/validation_resourceclaim_test.go
Normal file
629
pkg/apis/resource/validation/validation_resourceclaim_test.go
Normal file
@ -0,0 +1,629 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testClaim(name, namespace string, spec resource.ResourceClaimSpec) *resource.ResourceClaim {
|
||||
return &resource.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: spec,
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClaim(t *testing.T) {
|
||||
validMode := resource.AllocationModeImmediate
|
||||
invalidMode := resource.AllocationMode("invalid")
|
||||
goodName := "foo"
|
||||
badName := "!@#$%^"
|
||||
goodNS := "ns"
|
||||
goodClaimSpec := resource.ResourceClaimSpec{
|
||||
ResourceClassName: goodName,
|
||||
AllocationMode: validMode,
|
||||
}
|
||||
now := metav1.Now()
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
claim *resource.ResourceClaim
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-claim": {
|
||||
claim: testClaim(goodName, goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
claim: testClaim("", goodNS, goodClaimSpec),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
claim: testClaim(badName, goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
|
||||
claim: testClaim(goodName, "", goodClaimSpec),
|
||||
},
|
||||
"generate-name": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.GenerateName = "pvc-"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ResourceVersion = "1"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Generation = 100
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.CreationTimestamp = now
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"bad-classname": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "resourceClassName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Spec.ResourceClassName = badName
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"bad-mode": {
|
||||
wantFailures: field.ErrorList{field.NotSupported(field.NewPath("spec", "allocationMode"), invalidMode, supportedAllocationModes.List())},
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Spec.AllocationMode = invalidMode
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"good-parameters": {
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
|
||||
Kind: "foo",
|
||||
Name: "bar",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"missing-parameters-kind": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "parametersRef", "kind"), "")},
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
|
||||
Name: "bar",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"missing-parameters-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "parametersRef", "name"), "")},
|
||||
claim: func() *resource.ResourceClaim {
|
||||
claim := testClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
|
||||
Kind: "foo",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidateClaim(scenario.claim)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClaimUpdate(t *testing.T) {
|
||||
name := "valid"
|
||||
parameters := &resource.ResourceClaimParametersReference{
|
||||
Kind: "foo",
|
||||
Name: "bar",
|
||||
}
|
||||
validClaim := testClaim("foo", "ns", resource.ResourceClaimSpec{
|
||||
ResourceClassName: name,
|
||||
AllocationMode: resource.AllocationModeImmediate,
|
||||
ParametersRef: parameters,
|
||||
})
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldClaim *resource.ResourceClaim
|
||||
update func(claim *resource.ResourceClaim) *resource.ResourceClaim
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { return claim },
|
||||
},
|
||||
"invalid-update-class": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimSpec {
|
||||
spec := validClaim.Spec.DeepCopy()
|
||||
spec.ResourceClassName += "2"
|
||||
return *spec
|
||||
}(), "field is immutable")},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Spec.ResourceClassName += "2"
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-update-remove-parameters": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimSpec {
|
||||
spec := validClaim.Spec.DeepCopy()
|
||||
spec.ParametersRef = nil
|
||||
return *spec
|
||||
}(), "field is immutable")},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Spec.ParametersRef = nil
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-update-mode": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimSpec {
|
||||
spec := validClaim.Spec.DeepCopy()
|
||||
spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
|
||||
return *spec
|
||||
}(), "field is immutable")},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
|
||||
return claim
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldClaim.ResourceVersion = "1"
|
||||
errs := ValidateClaimUpdate(scenario.update(scenario.oldClaim.DeepCopy()), scenario.oldClaim)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClaimStatusUpdate(t *testing.T) {
|
||||
validClaim := testClaim("foo", "ns", resource.ResourceClaimSpec{
|
||||
ResourceClassName: "valid",
|
||||
AllocationMode: resource.AllocationModeImmediate,
|
||||
})
|
||||
|
||||
validAllocatedClaim := validClaim.DeepCopy()
|
||||
validAllocatedClaim.Status = resource.ResourceClaimStatus{
|
||||
DriverName: "valid",
|
||||
Allocation: &resource.AllocationResult{
|
||||
ResourceHandle: strings.Repeat(" ", resource.ResourceHandleMaxSize),
|
||||
Shareable: true,
|
||||
},
|
||||
}
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldClaim *resource.ResourceClaim
|
||||
update func(claim *resource.ResourceClaim) *resource.ResourceClaim
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { return claim },
|
||||
},
|
||||
"add-driver": {
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DriverName = "valid"
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-add-allocation": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "driverName"), "must be specified when `allocation` is set")},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
// DriverName must also get set here!
|
||||
claim.Status.Allocation = &resource.AllocationResult{}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"valid-add-allocation": {
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DriverName = "valid"
|
||||
claim.Status.Allocation = &resource.AllocationResult{
|
||||
ResourceHandle: strings.Repeat(" ", resource.ResourceHandleMaxSize),
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-allocation-handle": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "allocation", "resourceHandle"), resource.ResourceHandleMaxSize+1, resource.ResourceHandleMaxSize)},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DriverName = "valid"
|
||||
claim.Status.Allocation = &resource.AllocationResult{
|
||||
ResourceHandle: strings.Repeat(" ", resource.ResourceHandleMaxSize+1),
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-node-selector": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "allocation", "availableOnNodes", "nodeSelectorTerms"), "must have at least one node selector term")},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DriverName = "valid"
|
||||
claim.Status.Allocation = &resource.AllocationResult{
|
||||
AvailableOnNodes: &core.NodeSelector{
|
||||
// Must not be empty.
|
||||
},
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"add-reservation": {
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
for i := 0; i < resource.ResourceClaimReservedForMaxSize; i++ {
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
})
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"add-reservation-and-allocation": {
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status = *validAllocatedClaim.Status.DeepCopy()
|
||||
for i := 0; i < resource.ResourceClaimReservedForMaxSize; i++ {
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
})
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-too-large": {
|
||||
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "reservedFor"), resource.ResourceClaimReservedForMaxSize+1, resource.ResourceClaimReservedForMaxSize)},
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
for i := 0; i < resource.ResourceClaimReservedForMaxSize+1; i++ {
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
})
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-duplicate": {
|
||||
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "reservedFor").Index(1), resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
})},
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
for i := 0; i < 2; i++ {
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
})
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-not-shared": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "may not be reserved more than once")},
|
||||
oldClaim: func() *resource.ResourceClaim {
|
||||
claim := validAllocatedClaim.DeepCopy()
|
||||
claim.Status.Allocation.Shareable = false
|
||||
return claim
|
||||
}(),
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
for i := 0; i < 2; i++ {
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor,
|
||||
resource.ResourceClaimConsumerReference{
|
||||
Resource: "pods",
|
||||
Name: fmt.Sprintf("foo-%d", i),
|
||||
UID: "1",
|
||||
})
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-no-allocation": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "may not be specified when `allocated` is not set")},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DriverName = "valid"
|
||||
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
|
||||
{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-no-resource": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "reservedFor").Index(0).Child("resource"), "")},
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
|
||||
{
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-no-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "reservedFor").Index(0).Child("name"), "")},
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
|
||||
{
|
||||
Resource: "pods",
|
||||
UID: "1",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-for-no-uid": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("status", "reservedFor").Index(0).Child("uid"), "")},
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
|
||||
{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-deleted": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "new entries may not be added while `deallocationRequested` or `deletionTimestamp` are set")},
|
||||
oldClaim: func() *resource.ResourceClaim {
|
||||
claim := validAllocatedClaim.DeepCopy()
|
||||
var deletionTimestamp metav1.Time
|
||||
claim.DeletionTimestamp = &deletionTimestamp
|
||||
return claim
|
||||
}(),
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
|
||||
{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-reserved-deallocation-requested": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "new entries may not be added while `deallocationRequested` or `deletionTimestamp` are set")},
|
||||
oldClaim: func() *resource.ResourceClaim {
|
||||
claim := validAllocatedClaim.DeepCopy()
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
}(),
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
|
||||
{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"add-deallocation-requested": {
|
||||
oldClaim: validAllocatedClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-deallocation-requested-removal": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "deallocationRequested"), "may not be cleared when `allocation` is set")},
|
||||
oldClaim: func() *resource.ResourceClaim {
|
||||
claim := validAllocatedClaim.DeepCopy()
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
}(),
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DeallocationRequested = false
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-deallocation-requested-in-use": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "deallocationRequested"), "deallocation cannot be requested while `reservedFor` is set")},
|
||||
oldClaim: func() *resource.ResourceClaim {
|
||||
claim := validAllocatedClaim.DeepCopy()
|
||||
claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{
|
||||
{
|
||||
Resource: "pods",
|
||||
Name: "foo",
|
||||
UID: "1",
|
||||
},
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-deallocation-not-allocated": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status"), "`allocation` must be set when `deallocationRequested` is set")},
|
||||
oldClaim: validClaim,
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
},
|
||||
},
|
||||
"invalid-allocation-removal-not-reset": {
|
||||
wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status"), "`allocation` must be set when `deallocationRequested` is set")},
|
||||
oldClaim: func() *resource.ResourceClaim {
|
||||
claim := validAllocatedClaim.DeepCopy()
|
||||
claim.Status.DeallocationRequested = true
|
||||
return claim
|
||||
}(),
|
||||
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
|
||||
claim.Status.Allocation = nil
|
||||
return claim
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldClaim.ResourceVersion = "1"
|
||||
errs := ValidateClaimStatusUpdate(scenario.update(scenario.oldClaim.DeepCopy()), scenario.oldClaim)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
@ -0,0 +1,313 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testClaimTemplate(name, namespace string, spec resource.ResourceClaimSpec) *resource.ResourceClaimTemplate {
|
||||
return &resource.ResourceClaimTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: resource.ResourceClaimTemplateSpec{
|
||||
Spec: spec,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClaimTemplate(t *testing.T) {
|
||||
validMode := resource.AllocationModeImmediate
|
||||
invalidMode := resource.AllocationMode("invalid")
|
||||
goodName := "foo"
|
||||
badName := "!@#$%^"
|
||||
goodNS := "ns"
|
||||
goodClaimSpec := resource.ResourceClaimSpec{
|
||||
ResourceClassName: goodName,
|
||||
AllocationMode: validMode,
|
||||
}
|
||||
now := metav1.Now()
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
template *resource.ResourceClaimTemplate
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-claim": {
|
||||
template: testClaimTemplate(goodName, goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
template: testClaimTemplate("", goodNS, goodClaimSpec),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
template: testClaimTemplate(badName, goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
|
||||
template: testClaimTemplate(goodName, "", goodClaimSpec),
|
||||
},
|
||||
"generate-name": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.GenerateName = "pvc-"
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.ResourceVersion = "1"
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Generation = 100
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.CreationTimestamp = now
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"bad-classname": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "spec", "resourceClassName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Spec.Spec.ResourceClassName = badName
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"bad-mode": {
|
||||
wantFailures: field.ErrorList{field.NotSupported(field.NewPath("spec", "spec", "allocationMode"), invalidMode, supportedAllocationModes.List())},
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Spec.Spec.AllocationMode = invalidMode
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"good-parameters": {
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Spec.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
|
||||
Kind: "foo",
|
||||
Name: "bar",
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"missing-parameters-kind": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "spec", "parametersRef", "kind"), "")},
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Spec.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
|
||||
Name: "bar",
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
"missing-parameters-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("spec", "spec", "parametersRef", "name"), "")},
|
||||
template: func() *resource.ResourceClaimTemplate {
|
||||
template := testClaimTemplate(goodName, goodNS, goodClaimSpec)
|
||||
template.Spec.Spec.ParametersRef = &resource.ResourceClaimParametersReference{
|
||||
Kind: "foo",
|
||||
}
|
||||
return template
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidateClaimTemplate(scenario.template)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClaimTemplateUpdate(t *testing.T) {
|
||||
name := "valid"
|
||||
parameters := &resource.ResourceClaimParametersReference{
|
||||
Kind: "foo",
|
||||
Name: "bar",
|
||||
}
|
||||
validClaimTemplate := testClaimTemplate("foo", "ns", resource.ResourceClaimSpec{
|
||||
ResourceClassName: name,
|
||||
AllocationMode: resource.AllocationModeImmediate,
|
||||
ParametersRef: parameters,
|
||||
})
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldClaimTemplate *resource.ResourceClaimTemplate
|
||||
update func(claim *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldClaimTemplate: validClaimTemplate,
|
||||
update: func(claim *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate { return claim },
|
||||
},
|
||||
"invalid-update-class": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimTemplateSpec {
|
||||
spec := validClaimTemplate.Spec.DeepCopy()
|
||||
spec.Spec.ResourceClassName += "2"
|
||||
return *spec
|
||||
}(), "field is immutable")},
|
||||
oldClaimTemplate: validClaimTemplate,
|
||||
update: func(template *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate {
|
||||
template.Spec.Spec.ResourceClassName += "2"
|
||||
return template
|
||||
},
|
||||
},
|
||||
"invalid-update-remove-parameters": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimTemplateSpec {
|
||||
spec := validClaimTemplate.Spec.DeepCopy()
|
||||
spec.Spec.ParametersRef = nil
|
||||
return *spec
|
||||
}(), "field is immutable")},
|
||||
oldClaimTemplate: validClaimTemplate,
|
||||
update: func(template *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate {
|
||||
template.Spec.Spec.ParametersRef = nil
|
||||
return template
|
||||
},
|
||||
},
|
||||
"invalid-update-mode": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec"), func() resource.ResourceClaimTemplateSpec {
|
||||
spec := validClaimTemplate.Spec.DeepCopy()
|
||||
spec.Spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
|
||||
return *spec
|
||||
}(), "field is immutable")},
|
||||
oldClaimTemplate: validClaimTemplate,
|
||||
update: func(template *resource.ResourceClaimTemplate) *resource.ResourceClaimTemplate {
|
||||
template.Spec.Spec.AllocationMode = resource.AllocationModeWaitForFirstConsumer
|
||||
return template
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldClaimTemplate.ResourceVersion = "1"
|
||||
errs := ValidateClaimTemplateUpdate(scenario.update(scenario.oldClaimTemplate.DeepCopy()), scenario.oldClaimTemplate)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
282
pkg/apis/resource/validation/validation_resourceclass_test.go
Normal file
282
pkg/apis/resource/validation/validation_resourceclass_test.go
Normal file
@ -0,0 +1,282 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package validation
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func testClass(name, driverName string) *resource.ResourceClass {
|
||||
return &resource.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
DriverName: driverName,
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClass(t *testing.T) {
|
||||
goodName := "foo"
|
||||
now := metav1.Now()
|
||||
goodParameters := resource.ResourceClassParametersReference{
|
||||
Name: "valid",
|
||||
Namespace: "valid",
|
||||
Kind: "foo",
|
||||
}
|
||||
badName := "!@#$%^"
|
||||
badValue := "spaces not allowed"
|
||||
|
||||
scenarios := map[string]struct {
|
||||
class *resource.ResourceClass
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"good-class": {
|
||||
class: testClass(goodName, goodName),
|
||||
},
|
||||
"good-long-driver-name": {
|
||||
class: testClass(goodName, "acme.example.com"),
|
||||
},
|
||||
"missing-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
|
||||
class: testClass("", goodName),
|
||||
},
|
||||
"bad-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
class: testClass(badName, goodName),
|
||||
},
|
||||
"generate-name": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.GenerateName = "pvc-"
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"uid": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"resource-version": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.ResourceVersion = "1"
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"generation": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.Generation = 100
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"creation-timestamp": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.CreationTimestamp = now
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"deletion-grace-period-seconds": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.DeletionGracePeriodSeconds = pointer.Int64(10)
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"owner-references": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"finalizers": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"managed-fields": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"good-labels": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"bad-labels": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.Labels = map[string]string{
|
||||
"hello-world": badValue,
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"good-annotations": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.Annotations = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"bad-annotations": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.Annotations = map[string]string{
|
||||
badName: "hello world",
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"missing-driver-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("driverName"), ""),
|
||||
field.Invalid(field.NewPath("driverName"), "", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
|
||||
},
|
||||
class: testClass(goodName, ""),
|
||||
},
|
||||
"invalid-driver-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
class: testClass(goodName, badName),
|
||||
},
|
||||
"invalid-qualified-driver-name": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), goodName+"/path", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
|
||||
class: testClass(goodName, goodName+"/path"),
|
||||
},
|
||||
"good-parameters": {
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.ParametersRef = goodParameters.DeepCopy()
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"missing-parameters-name": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("parametersRef", "name"), "")},
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.ParametersRef = goodParameters.DeepCopy()
|
||||
class.ParametersRef.Name = ""
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"bad-parameters-namespace": {
|
||||
wantFailures: field.ErrorList{field.Invalid(field.NewPath("parametersRef", "namespace"), badName, "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')")},
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.ParametersRef = goodParameters.DeepCopy()
|
||||
class.ParametersRef.Namespace = badName
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"missing-parameters-kind": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("parametersRef", "kind"), "")},
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.ParametersRef = goodParameters.DeepCopy()
|
||||
class.ParametersRef.Kind = ""
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
"invalid-node-selector": {
|
||||
wantFailures: field.ErrorList{field.Required(field.NewPath("suitableNodes", "nodeSelectorTerms"), "must have at least one node selector term")},
|
||||
class: func() *resource.ResourceClass {
|
||||
class := testClass(goodName, goodName)
|
||||
class.SuitableNodes = &core.NodeSelector{
|
||||
// Must not be empty.
|
||||
}
|
||||
return class
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidateClass(scenario.class)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClassUpdate(t *testing.T) {
|
||||
validClass := testClass("foo", "valid")
|
||||
|
||||
scenarios := map[string]struct {
|
||||
oldClass *resource.ResourceClass
|
||||
update func(class *resource.ResourceClass) *resource.ResourceClass
|
||||
wantFailures field.ErrorList
|
||||
}{
|
||||
"valid-no-op-update": {
|
||||
oldClass: validClass,
|
||||
update: func(class *resource.ResourceClass) *resource.ResourceClass { return class },
|
||||
},
|
||||
"update-driver": {
|
||||
oldClass: validClass,
|
||||
update: func(class *resource.ResourceClass) *resource.ResourceClass {
|
||||
class.DriverName += "2"
|
||||
return class
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
scenario.oldClass.ResourceVersion = "1"
|
||||
errs := ValidateClassUpdate(scenario.update(scenario.oldClass.DeepCopy()), scenario.oldClass)
|
||||
assert.Equal(t, scenario.wantFailures, errs)
|
||||
})
|
||||
}
|
||||
}
|
@ -38,6 +38,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/node/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/policy/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||
)
|
||||
|
@ -54,6 +54,7 @@ import (
|
||||
policyapiv1 "k8s.io/api/policy/v1"
|
||||
policyapiv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
|
||||
schedulingapiv1 "k8s.io/api/scheduling/v1"
|
||||
storageapiv1 "k8s.io/api/storage/v1"
|
||||
storageapiv1alpha1 "k8s.io/api/storage/v1alpha1"
|
||||
@ -108,6 +109,7 @@ import (
|
||||
noderest "k8s.io/kubernetes/pkg/registry/node/rest"
|
||||
policyrest "k8s.io/kubernetes/pkg/registry/policy/rest"
|
||||
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
|
||||
resourcerest "k8s.io/kubernetes/pkg/registry/resource/rest"
|
||||
schedulingrest "k8s.io/kubernetes/pkg/registry/scheduling/rest"
|
||||
storagerest "k8s.io/kubernetes/pkg/registry/storage/rest"
|
||||
)
|
||||
@ -435,6 +437,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
|
||||
appsrest.StorageProvider{},
|
||||
admissionregistrationrest.RESTStorageProvider{Authorizer: c.GenericConfig.Authorization.Authorizer, DiscoveryClient: discoveryClientForAdmissionRegistration},
|
||||
eventsrest.RESTStorageProvider{TTL: c.ExtraConfig.EventTTL},
|
||||
resourcerest.RESTStorageProvider{},
|
||||
}
|
||||
if err := m.InstallAPIs(c.ExtraConfig.APIResourceConfigSource, c.GenericConfig.RESTOptionsGetter, restStorageProviders...); err != nil {
|
||||
return nil, err
|
||||
@ -704,6 +707,7 @@ var (
|
||||
admissionregistrationv1alpha1.SchemeGroupVersion,
|
||||
apiserverinternalv1alpha1.SchemeGroupVersion,
|
||||
authenticationv1alpha1.SchemeGroupVersion,
|
||||
resourcev1alpha1.SchemeGroupVersion,
|
||||
networkingapiv1alpha1.SchemeGroupVersion,
|
||||
storageapiv1alpha1.SchemeGroupVersion,
|
||||
flowcontrolv1alpha1.SchemeGroupVersion,
|
||||
|
@ -78,6 +78,9 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/apis/rbac/v1
|
||||
- k8s.io/kubernetes/pkg/apis/rbac/v1alpha1
|
||||
- k8s.io/kubernetes/pkg/apis/rbac/v1beta1
|
||||
- k8s.io/kubernetes/pkg/apis/resource
|
||||
- k8s.io/kubernetes/pkg/apis/resource/install
|
||||
- k8s.io/kubernetes/pkg/apis/resource/v1alpha1
|
||||
- k8s.io/kubernetes/pkg/apis/scheduling
|
||||
- k8s.io/kubernetes/pkg/apis/scheduling/install
|
||||
- k8s.io/kubernetes/pkg/apis/scheduling/v1alpha1
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/node/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/policy/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/policy/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/rbac/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/scheduling/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/storage/install"
|
||||
)
|
||||
|
6
pkg/registry/resource/OWNERS
Normal file
6
pkg/registry/resource/OWNERS
Normal file
@ -0,0 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- bart0sh
|
||||
- klueska
|
||||
- pohly
|
100
pkg/registry/resource/podscheduling/storage/storage.go
Normal file
100
pkg/registry/resource/podscheduling/storage/storage.go
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/podscheduling"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for PodSchedulings.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against PodSchedulings.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &resource.PodScheduling{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.PodSchedulingList{} },
|
||||
PredicateFunc: podscheduling.Match,
|
||||
DefaultQualifiedResource: resource.Resource("podschedulings"),
|
||||
|
||||
CreateStrategy: podscheduling.Strategy,
|
||||
UpdateStrategy: podscheduling.Strategy,
|
||||
DeleteStrategy: podscheduling.Strategy,
|
||||
ReturnDeletedObject: true,
|
||||
ResetFieldsStrategy: podscheduling.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podscheduling.GetAttrs}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statusStore := *store
|
||||
statusStore.UpdateStrategy = podscheduling.StatusStrategy
|
||||
statusStore.ResetFieldsStrategy = podscheduling.StatusStrategy
|
||||
|
||||
rest := &REST{store}
|
||||
|
||||
return rest, &StatusREST{store: &statusStore}, nil
|
||||
}
|
||||
|
||||
// StatusREST implements the REST endpoint for changing the status of a PodScheduling.
|
||||
type StatusREST struct {
|
||||
store *genericregistry.Store
|
||||
}
|
||||
|
||||
// New creates a new PodScheduling object.
|
||||
func (r *StatusREST) New() runtime.Object {
|
||||
return &resource.PodScheduling{}
|
||||
}
|
||||
|
||||
func (r *StatusREST) Destroy() {
|
||||
// Given that underlying store is shared with REST,
|
||||
// we don't destroy it here explicitly.
|
||||
}
|
||||
|
||||
// Get retrieves the object from the storage. It is required to support Patch.
|
||||
func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return r.store.Get(ctx, name, options)
|
||||
}
|
||||
|
||||
// Update alters the status subset of an object.
|
||||
func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
// We are explicitly setting forceAllowCreate to false in the call to the underlying storage because
|
||||
// subresources should never allow create on update.
|
||||
return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options)
|
||||
}
|
||||
|
||||
// GetResetFields implements rest.ResetFieldsStrategy
|
||||
func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
return r.store.GetResetFields()
|
||||
}
|
184
pkg/registry/resource/podscheduling/storage/storage_test.go
Normal file
184
pkg/registry/resource/podscheduling/storage/storage_test.go
Normal file
@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||
)
|
||||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "podschedulings",
|
||||
}
|
||||
podSchedulingStorage, statusStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
}
|
||||
return podSchedulingStorage, statusStorage, server
|
||||
}
|
||||
|
||||
func validNewPodScheduling(name, ns string) *resource.PodScheduling {
|
||||
scheduling := &resource.PodScheduling{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: resource.PodSchedulingSpec{
|
||||
SelectedNode: "worker",
|
||||
},
|
||||
Status: resource.PodSchedulingStatus{},
|
||||
}
|
||||
return scheduling
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
scheduling := validNewPodScheduling("foo", metav1.NamespaceDefault)
|
||||
scheduling.ObjectMeta = metav1.ObjectMeta{}
|
||||
test.TestCreate(
|
||||
// valid
|
||||
scheduling,
|
||||
// invalid
|
||||
&resource.PodScheduling{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validNewPodScheduling("foo", metav1.NamespaceDefault),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.PodScheduling)
|
||||
if object.Labels == nil {
|
||||
object.Labels = map[string]string{}
|
||||
}
|
||||
object.Labels["foo"] = "bar"
|
||||
return object
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
|
||||
test.TestDelete(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestGet(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestList(validNewPodScheduling("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestWatch(
|
||||
validNewPodScheduling("foo", metav1.NamespaceDefault),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
[]labels.Set{
|
||||
{"foo": "bar"},
|
||||
},
|
||||
// matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "foo"},
|
||||
},
|
||||
// not matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "bar"},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdateStatus(t *testing.T) {
|
||||
storage, statusStorage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
|
||||
key, _ := storage.KeyFunc(ctx, "foo")
|
||||
schedulingStart := validNewPodScheduling("foo", metav1.NamespaceDefault)
|
||||
err := storage.Storage.Create(ctx, key, schedulingStart, nil, 0, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
scheduling := schedulingStart.DeepCopy()
|
||||
scheduling.Status.ResourceClaims = append(scheduling.Status.ResourceClaims,
|
||||
resource.ResourceClaimSchedulingStatus{
|
||||
Name: "my-claim",
|
||||
},
|
||||
)
|
||||
_, _, err = statusStorage.Update(ctx, scheduling.Name, rest.DefaultUpdatedObjectInfo(scheduling), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
obj, err := storage.Get(ctx, "foo", &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
schedulingOut := obj.(*resource.PodScheduling)
|
||||
// only compare relevant changes b/c of difference in metadata
|
||||
if !apiequality.Semantic.DeepEqual(scheduling.Status, schedulingOut.Status) {
|
||||
t.Errorf("unexpected object: %s", diff.ObjectDiff(scheduling.Status, schedulingOut.Status))
|
||||
}
|
||||
}
|
163
pkg/registry/resource/podscheduling/strategy.go
Normal file
163
pkg/registry/resource/podscheduling/strategy.go
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podscheduling
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// podSchedulingStrategy implements behavior for PodScheduling objects
|
||||
type podSchedulingStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
// Strategy is the default logic that applies when creating and updating
|
||||
// ResourceClaim objects via the REST API.
|
||||
var Strategy = podSchedulingStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
func (podSchedulingStrategy) NamespaceScoped() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy and
|
||||
// should not be modified by the user. For a new PodScheduling that is the
|
||||
// status.
|
||||
func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
"resource.k8s.io/v1alpha1": fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("status"),
|
||||
),
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
scheduling := obj.(*resource.PodScheduling)
|
||||
// Status must not be set by user on create.
|
||||
scheduling.Status = resource.PodSchedulingStatus{}
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
scheduling := obj.(*resource.PodScheduling)
|
||||
return validation.ValidatePodScheduling(scheduling)
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) Canonicalize(obj runtime.Object) {
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
newScheduling.Status = oldScheduling.Status
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
errorList := validation.ValidatePodScheduling(newScheduling)
|
||||
return append(errorList, validation.ValidatePodSchedulingUpdate(newScheduling, oldScheduling)...)
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (podSchedulingStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type podSchedulingStatusStrategy struct {
|
||||
podSchedulingStrategy
|
||||
}
|
||||
|
||||
var StatusStrategy = podSchedulingStatusStrategy{Strategy}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy and
|
||||
// should not be modified by the user. For a status update that is the spec.
|
||||
func (podSchedulingStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
"resource.k8s.io/v1alpha1": fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("spec"),
|
||||
),
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (podSchedulingStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
newScheduling.Spec = oldScheduling.Spec
|
||||
}
|
||||
|
||||
func (podSchedulingStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newScheduling := obj.(*resource.PodScheduling)
|
||||
oldScheduling := old.(*resource.PodScheduling)
|
||||
return validation.ValidatePodSchedulingStatusUpdate(newScheduling, oldScheduling)
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (podSchedulingStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns a generic matcher for a given label and field selector.
|
||||
func Match(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
|
||||
return storage.SelectionPredicate{
|
||||
Label: label,
|
||||
Field: field,
|
||||
GetAttrs: GetAttrs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAttrs returns labels and fields of a given object for filtering purposes.
|
||||
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
scheduling, ok := obj.(*resource.PodScheduling)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("not a PodScheduling")
|
||||
}
|
||||
return labels.Set(scheduling.Labels), toSelectableFields(scheduling), nil
|
||||
}
|
||||
|
||||
// toSelectableFields returns a field set that represents the object
|
||||
func toSelectableFields(scheduling *resource.PodScheduling) fields.Set {
|
||||
fields := generic.ObjectMetaFieldsSet(&scheduling.ObjectMeta, true)
|
||||
return fields
|
||||
}
|
84
pkg/registry/resource/podscheduling/strategy_test.go
Normal file
84
pkg/registry/resource/podscheduling/strategy_test.go
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podscheduling
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
var podScheduling = &resource.PodScheduling{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: resource.PodSchedulingSpec{
|
||||
SelectedNode: "worker",
|
||||
},
|
||||
}
|
||||
|
||||
func TestPodSchedulingStrategy(t *testing.T) {
|
||||
if !Strategy.NamespaceScoped() {
|
||||
t.Errorf("PodScheduling must be namespace scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("PodScheduling should not allow create on update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodSchedulingStrategyCreate(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
|
||||
Strategy.PrepareForCreate(ctx, podScheduling)
|
||||
errs := Strategy.Validate(ctx, podScheduling)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected error validating for create %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodSchedulingStrategyUpdate(t *testing.T) {
|
||||
t.Run("no-changes-okay", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling)
|
||||
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected validation errors: %v", errs)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("name-change-not-allowed", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
podScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling := podScheduling.DeepCopy()
|
||||
newPodScheduling.Name = "valid-claim-2"
|
||||
newPodScheduling.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newPodScheduling, podScheduling)
|
||||
errs := Strategy.ValidateUpdate(ctx, newPodScheduling, podScheduling)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected a validation error")
|
||||
}
|
||||
})
|
||||
}
|
100
pkg/registry/resource/resourceclaim/storage/storage.go
Normal file
100
pkg/registry/resource/resourceclaim/storage/storage.go
Normal file
@ -0,0 +1,100 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/resourceclaim"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for ResourceClaims.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against ResourceClaims.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &resource.ResourceClaim{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.ResourceClaimList{} },
|
||||
PredicateFunc: resourceclaim.Match,
|
||||
DefaultQualifiedResource: resource.Resource("resourceclaims"),
|
||||
|
||||
CreateStrategy: resourceclaim.Strategy,
|
||||
UpdateStrategy: resourceclaim.Strategy,
|
||||
DeleteStrategy: resourceclaim.Strategy,
|
||||
ReturnDeletedObject: true,
|
||||
ResetFieldsStrategy: resourceclaim.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: resourceclaim.GetAttrs}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statusStore := *store
|
||||
statusStore.UpdateStrategy = resourceclaim.StatusStrategy
|
||||
statusStore.ResetFieldsStrategy = resourceclaim.StatusStrategy
|
||||
|
||||
rest := &REST{store}
|
||||
|
||||
return rest, &StatusREST{store: &statusStore}, nil
|
||||
}
|
||||
|
||||
// StatusREST implements the REST endpoint for changing the status of a ResourceClaim.
|
||||
type StatusREST struct {
|
||||
store *genericregistry.Store
|
||||
}
|
||||
|
||||
// New creates a new ResourceClaim object.
|
||||
func (r *StatusREST) New() runtime.Object {
|
||||
return &resource.ResourceClaim{}
|
||||
}
|
||||
|
||||
func (r *StatusREST) Destroy() {
|
||||
// Given that underlying store is shared with REST,
|
||||
// we don't destroy it here explicitly.
|
||||
}
|
||||
|
||||
// Get retrieves the object from the storage. It is required to support Patch.
|
||||
func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return r.store.Get(ctx, name, options)
|
||||
}
|
||||
|
||||
// Update alters the status subset of an object.
|
||||
func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
// We are explicitly setting forceAllowCreate to false in the call to the underlying storage because
|
||||
// subresources should never allow create on update.
|
||||
return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options)
|
||||
}
|
||||
|
||||
// GetResetFields implements rest.ResetFieldsStrategy
|
||||
func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
return r.store.GetResetFields()
|
||||
}
|
182
pkg/registry/resource/resourceclaim/storage/storage_test.go
Normal file
182
pkg/registry/resource/resourceclaim/storage/storage_test.go
Normal file
@ -0,0 +1,182 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||
)
|
||||
|
||||
func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "resourceclaims",
|
||||
}
|
||||
resourceClaimStorage, statusStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
}
|
||||
return resourceClaimStorage, statusStorage, server
|
||||
}
|
||||
|
||||
func validNewClaim(name, ns string) *resource.ResourceClaim {
|
||||
claim := &resource.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: resource.ResourceClaimSpec{
|
||||
ResourceClassName: "example",
|
||||
AllocationMode: resource.AllocationModeImmediate,
|
||||
},
|
||||
Status: resource.ResourceClaimStatus{},
|
||||
}
|
||||
return claim
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
claim := validNewClaim("foo", metav1.NamespaceDefault)
|
||||
claim.ObjectMeta = metav1.ObjectMeta{}
|
||||
test.TestCreate(
|
||||
// valid
|
||||
claim,
|
||||
// invalid
|
||||
&resource.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validNewClaim("foo", metav1.NamespaceDefault),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.ResourceClaim)
|
||||
if object.Labels == nil {
|
||||
object.Labels = map[string]string{}
|
||||
}
|
||||
object.Labels["foo"] = "bar"
|
||||
return object
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
|
||||
test.TestDelete(validNewClaim("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestGet(validNewClaim("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestList(validNewClaim("foo", metav1.NamespaceDefault))
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
storage, _, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestWatch(
|
||||
validNewClaim("foo", metav1.NamespaceDefault),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
[]labels.Set{
|
||||
{"foo": "bar"},
|
||||
},
|
||||
// matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "foo"},
|
||||
},
|
||||
// not matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "bar"},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdateStatus(t *testing.T) {
|
||||
storage, statusStorage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
|
||||
key, _ := storage.KeyFunc(ctx, "foo")
|
||||
claimStart := validNewClaim("foo", metav1.NamespaceDefault)
|
||||
err := storage.Storage.Create(ctx, key, claimStart, nil, 0, false)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
claim := claimStart.DeepCopy()
|
||||
claim.Status.DriverName = "some-driver.example.com"
|
||||
claim.Status.Allocation = &resource.AllocationResult{}
|
||||
_, _, err = statusStorage.Update(ctx, claim.Name, rest.DefaultUpdatedObjectInfo(claim), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
obj, err := storage.Get(ctx, "foo", &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
claimOut := obj.(*resource.ResourceClaim)
|
||||
// only compare relevant changes b/c of difference in metadata
|
||||
if !apiequality.Semantic.DeepEqual(claim.Status, claimOut.Status) {
|
||||
t.Errorf("unexpected object: %s", diff.ObjectDiff(claim.Status, claimOut.Status))
|
||||
}
|
||||
}
|
163
pkg/registry/resource/resourceclaim/strategy.go
Normal file
163
pkg/registry/resource/resourceclaim/strategy.go
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourceclaim
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// resourceclaimStrategy implements behavior for ResourceClaim objects
|
||||
type resourceclaimStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
// Strategy is the default logic that applies when creating and updating
|
||||
// ResourceClaim objects via the REST API.
|
||||
var Strategy = resourceclaimStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
func (resourceclaimStrategy) NamespaceScoped() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy and
|
||||
// should not be modified by the user. For a new ResourceClaim that is the
|
||||
// status.
|
||||
func (resourceclaimStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
"resource.k8s.io/v1alpha1": fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("status"),
|
||||
),
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
claim := obj.(*resource.ResourceClaim)
|
||||
// Status must not be set by user on create.
|
||||
claim.Status = resource.ResourceClaimStatus{}
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
claim := obj.(*resource.ResourceClaim)
|
||||
return validation.ValidateClaim(claim)
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) Canonicalize(obj runtime.Object) {
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newClaim := obj.(*resource.ResourceClaim)
|
||||
oldClaim := old.(*resource.ResourceClaim)
|
||||
newClaim.Status = oldClaim.Status
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newClaim := obj.(*resource.ResourceClaim)
|
||||
oldClaim := old.(*resource.ResourceClaim)
|
||||
errorList := validation.ValidateClaim(newClaim)
|
||||
return append(errorList, validation.ValidateClaimUpdate(newClaim, oldClaim)...)
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (resourceclaimStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type resourceclaimStatusStrategy struct {
|
||||
resourceclaimStrategy
|
||||
}
|
||||
|
||||
var StatusStrategy = resourceclaimStatusStrategy{Strategy}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy and
|
||||
// should not be modified by the user. For a status update that is the spec.
|
||||
func (resourceclaimStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
"resource.k8s.io/v1alpha1": fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("spec"),
|
||||
),
|
||||
}
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
func (resourceclaimStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newClaim := obj.(*resource.ResourceClaim)
|
||||
oldClaim := old.(*resource.ResourceClaim)
|
||||
newClaim.Spec = oldClaim.Spec
|
||||
}
|
||||
|
||||
func (resourceclaimStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
newClaim := obj.(*resource.ResourceClaim)
|
||||
oldClaim := old.(*resource.ResourceClaim)
|
||||
return validation.ValidateClaimStatusUpdate(newClaim, oldClaim)
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (resourceclaimStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match returns a generic matcher for a given label and field selector.
|
||||
func Match(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
|
||||
return storage.SelectionPredicate{
|
||||
Label: label,
|
||||
Field: field,
|
||||
GetAttrs: GetAttrs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAttrs returns labels and fields of a given object for filtering purposes.
|
||||
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
claim, ok := obj.(*resource.ResourceClaim)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("not a resourceclaim")
|
||||
}
|
||||
return labels.Set(claim.Labels), toSelectableFields(claim), nil
|
||||
}
|
||||
|
||||
// toSelectableFields returns a field set that represents the object
|
||||
func toSelectableFields(claim *resource.ResourceClaim) fields.Set {
|
||||
fields := generic.ObjectMetaFieldsSet(&claim.ObjectMeta, true)
|
||||
return fields
|
||||
}
|
85
pkg/registry/resource/resourceclaim/strategy_test.go
Normal file
85
pkg/registry/resource/resourceclaim/strategy_test.go
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourceclaim
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
var resourceClaim = &resource.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-claim",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: resource.ResourceClaimSpec{
|
||||
ResourceClassName: "valid-class",
|
||||
AllocationMode: resource.AllocationModeImmediate,
|
||||
},
|
||||
}
|
||||
|
||||
func TestClaimStrategy(t *testing.T) {
|
||||
if !Strategy.NamespaceScoped() {
|
||||
t.Errorf("ResourceClaim must be namespace scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("ResourceClaim should not allow create on update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaimStrategyCreate(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClaim := resourceClaim.DeepCopy()
|
||||
|
||||
Strategy.PrepareForCreate(ctx, resourceClaim)
|
||||
errs := Strategy.Validate(ctx, resourceClaim)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected error validating for create %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaimStrategyUpdate(t *testing.T) {
|
||||
t.Run("no-changes-okay", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClaim := resourceClaim.DeepCopy()
|
||||
newClaim := resourceClaim.DeepCopy()
|
||||
newClaim.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newClaim, resourceClaim)
|
||||
errs := Strategy.ValidateUpdate(ctx, newClaim, resourceClaim)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected validation errors: %v", errs)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("name-change-not-allowed", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClaim := resourceClaim.DeepCopy()
|
||||
newClaim := resourceClaim.DeepCopy()
|
||||
newClaim.Name = "valid-claim-2"
|
||||
newClaim.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newClaim, resourceClaim)
|
||||
errs := Strategy.ValidateUpdate(ctx, newClaim, resourceClaim)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected a validation error")
|
||||
}
|
||||
})
|
||||
}
|
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for ResourceClaimTemplate.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against ResourceClass.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &resource.ResourceClaimTemplate{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.ResourceClaimTemplateList{} },
|
||||
DefaultQualifiedResource: resource.Resource("resourceclaimtemplates"),
|
||||
|
||||
CreateStrategy: resourceclaimtemplate.Strategy,
|
||||
UpdateStrategy: resourceclaimtemplate.Strategy,
|
||||
DeleteStrategy: resourceclaimtemplate.Strategy,
|
||||
ReturnDeletedObject: true,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: resourceclaimtemplate.GetAttrs}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &REST{store}, nil
|
||||
}
|
@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||
)
|
||||
|
||||
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "resourceclaimtemplates",
|
||||
}
|
||||
resourceClaimTemplateStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
}
|
||||
return resourceClaimTemplateStorage, server
|
||||
}
|
||||
|
||||
func validNewClaimTemplate(name string) *resource.ResourceClaimTemplate {
|
||||
return &resource.ResourceClaimTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: metav1.NamespaceDefault,
|
||||
},
|
||||
Spec: resource.ResourceClaimTemplateSpec{
|
||||
Spec: resource.ResourceClaimSpec{
|
||||
ResourceClassName: "valid-class",
|
||||
AllocationMode: resource.AllocationModeImmediate,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
resourceClaimTemplate := validNewClaimTemplate("foo")
|
||||
resourceClaimTemplate.ObjectMeta = metav1.ObjectMeta{GenerateName: "foo"}
|
||||
test.TestCreate(
|
||||
// valid
|
||||
resourceClaimTemplate,
|
||||
// invalid
|
||||
&resource.ResourceClaimTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validNewClaimTemplate("foo"),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.ResourceClaimTemplate)
|
||||
object.Labels = map[string]string{"a": "b"}
|
||||
return object
|
||||
},
|
||||
//invalid update
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.ResourceClaimTemplate)
|
||||
object.Spec.Spec.ResourceClassName = ""
|
||||
return object
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
|
||||
test.TestDelete(validNewClaimTemplate("foo"))
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestGet(validNewClaimTemplate("foo"))
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestList(validNewClaimTemplate("foo"))
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test.TestWatch(
|
||||
validNewClaimTemplate("foo"),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
[]labels.Set{
|
||||
{"foo": "bar"},
|
||||
},
|
||||
// matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "foo"},
|
||||
},
|
||||
// not matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "bar"},
|
||||
},
|
||||
)
|
||||
}
|
94
pkg/registry/resource/resourceclaimtemplate/strategy.go
Normal file
94
pkg/registry/resource/resourceclaimtemplate/strategy.go
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourceclaimtemplate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
||||
)
|
||||
|
||||
// resourceClaimTemplateStrategy implements behavior for ResourceClaimTemplate objects
|
||||
type resourceClaimTemplateStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
var Strategy = resourceClaimTemplateStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
func (resourceClaimTemplateStrategy) NamespaceScoped() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
resourceClaimTemplate := obj.(*resource.ResourceClaimTemplate)
|
||||
return validation.ValidateClaimTemplate(resourceClaimTemplate)
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) Canonicalize(obj runtime.Object) {
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
errorList := validation.ValidateClaimTemplate(obj.(*resource.ResourceClaimTemplate))
|
||||
return append(errorList, validation.ValidateClaimTemplateUpdate(obj.(*resource.ResourceClaimTemplate), old.(*resource.ResourceClaimTemplate))...)
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (resourceClaimTemplateStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetAttrs returns labels and fields of a given object for filtering purposes.
|
||||
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
template, ok := obj.(*resource.ResourceClaimTemplate)
|
||||
if !ok {
|
||||
return nil, nil, errors.New("not a resourceclaimtemplate")
|
||||
}
|
||||
return labels.Set(template.Labels), toSelectableFields(template), nil
|
||||
}
|
||||
|
||||
// toSelectableFields returns a field set that represents the object
|
||||
func toSelectableFields(template *resource.ResourceClaimTemplate) fields.Set {
|
||||
fields := generic.ObjectMetaFieldsSet(&template.ObjectMeta, true)
|
||||
return fields
|
||||
}
|
87
pkg/registry/resource/resourceclaimtemplate/strategy_test.go
Normal file
87
pkg/registry/resource/resourceclaimtemplate/strategy_test.go
Normal file
@ -0,0 +1,87 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourceclaimtemplate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
var resourceClaimTemplate = &resource.ResourceClaimTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-claim-template",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: resource.ResourceClaimTemplateSpec{
|
||||
Spec: resource.ResourceClaimSpec{
|
||||
ResourceClassName: "valid-class",
|
||||
AllocationMode: resource.AllocationModeImmediate,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestClaimTemplateStrategy(t *testing.T) {
|
||||
if !Strategy.NamespaceScoped() {
|
||||
t.Errorf("ResourceClaimTemplate must be namespace scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("ResourceClaimTemplate should not allow create on update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaimTemplateStrategyCreate(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClaimTemplate := resourceClaimTemplate.DeepCopy()
|
||||
|
||||
Strategy.PrepareForCreate(ctx, resourceClaimTemplate)
|
||||
errs := Strategy.Validate(ctx, resourceClaimTemplate)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected error validating for create %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClaimTemplateStrategyUpdate(t *testing.T) {
|
||||
t.Run("no-changes-okay", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClaimTemplate := resourceClaimTemplate.DeepCopy()
|
||||
newClaimTemplate := resourceClaimTemplate.DeepCopy()
|
||||
newClaimTemplate.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newClaimTemplate, resourceClaimTemplate)
|
||||
errs := Strategy.ValidateUpdate(ctx, newClaimTemplate, resourceClaimTemplate)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected validation errors: %v", errs)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("name-change-not-allowed", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClaimTemplate := resourceClaimTemplate.DeepCopy()
|
||||
newClaimTemplate := resourceClaimTemplate.DeepCopy()
|
||||
newClaimTemplate.Name = "valid-class-2"
|
||||
newClaimTemplate.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newClaimTemplate, resourceClaimTemplate)
|
||||
errs := Strategy.ValidateUpdate(ctx, newClaimTemplate, resourceClaimTemplate)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected a validation error")
|
||||
}
|
||||
})
|
||||
}
|
55
pkg/registry/resource/resourceclass/storage/storage.go
Normal file
55
pkg/registry/resource/resourceclass/storage/storage.go
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/resource/resourceclass"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for ResourceClass.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against ResourceClass.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &resource.ResourceClass{} },
|
||||
NewListFunc: func() runtime.Object { return &resource.ResourceClassList{} },
|
||||
DefaultQualifiedResource: resource.Resource("resourceclasses"),
|
||||
|
||||
CreateStrategy: resourceclass.Strategy,
|
||||
UpdateStrategy: resourceclass.Strategy,
|
||||
DeleteStrategy: resourceclass.Strategy,
|
||||
ReturnDeletedObject: true,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &REST{store}, nil
|
||||
}
|
145
pkg/registry/resource/resourceclass/storage/storage_test.go
Normal file
145
pkg/registry/resource/resourceclass/storage/storage_test.go
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
_ "k8s.io/kubernetes/pkg/apis/resource/install"
|
||||
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||
)
|
||||
|
||||
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "resourceclasses",
|
||||
}
|
||||
resourceClassStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
}
|
||||
return resourceClassStorage, server
|
||||
}
|
||||
|
||||
func validNewClass(name string) *resource.ResourceClass {
|
||||
return &resource.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
DriverName: "cdi.example.com",
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ClusterScope()
|
||||
resourceClass := validNewClass("foo")
|
||||
resourceClass.ObjectMeta = metav1.ObjectMeta{GenerateName: "foo"}
|
||||
test.TestCreate(
|
||||
// valid
|
||||
resourceClass,
|
||||
// invalid
|
||||
&resource.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ClusterScope()
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validNewClass("foo"),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.ResourceClass)
|
||||
object.ParametersRef = &resource.ResourceClassParametersReference{Kind: "cdiexample", Name: "some-name"}
|
||||
return object
|
||||
},
|
||||
//invalid update
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*resource.ResourceClass)
|
||||
object.DriverName = ""
|
||||
return object
|
||||
},
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ClusterScope().ReturnDeletedObject()
|
||||
test.TestDelete(validNewClass("foo"))
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ClusterScope()
|
||||
test.TestGet(validNewClass("foo"))
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ClusterScope()
|
||||
test.TestList(validNewClass("foo"))
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store).ClusterScope()
|
||||
test.TestWatch(
|
||||
validNewClass("foo"),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
[]labels.Set{
|
||||
{"foo": "bar"},
|
||||
},
|
||||
// matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "foo"},
|
||||
},
|
||||
// not matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "bar"},
|
||||
},
|
||||
)
|
||||
}
|
75
pkg/registry/resource/resourceclass/strategy.go
Normal file
75
pkg/registry/resource/resourceclass/strategy.go
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourceclass
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
"k8s.io/kubernetes/pkg/apis/resource/validation"
|
||||
)
|
||||
|
||||
// resourceClassStrategy implements behavior for ResourceClass objects
|
||||
type resourceClassStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
var Strategy = resourceClassStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
func (resourceClassStrategy) NamespaceScoped() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
resourceClass := obj.(*resource.ResourceClass)
|
||||
return validation.ValidateClass(resourceClass)
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) Canonicalize(obj runtime.Object) {
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
errorList := validation.ValidateClass(obj.(*resource.ResourceClass))
|
||||
return append(errorList, validation.ValidateClassUpdate(obj.(*resource.ResourceClass), old.(*resource.ResourceClass))...)
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (resourceClassStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
81
pkg/registry/resource/resourceclass/strategy_test.go
Normal file
81
pkg/registry/resource/resourceclass/strategy_test.go
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package resourceclass
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
)
|
||||
|
||||
var resourceClass = &resource.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-class",
|
||||
},
|
||||
DriverName: "resource-driver.example.com",
|
||||
}
|
||||
|
||||
func TestClassStrategy(t *testing.T) {
|
||||
if Strategy.NamespaceScoped() {
|
||||
t.Errorf("ResourceClass must not be namespace scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("ResourceClass should not allow create on update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassStrategyCreate(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClass := resourceClass.DeepCopy()
|
||||
|
||||
Strategy.PrepareForCreate(ctx, resourceClass)
|
||||
errs := Strategy.Validate(ctx, resourceClass)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected error validating for create %v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClassStrategyUpdate(t *testing.T) {
|
||||
t.Run("no-changes-okay", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClass := resourceClass.DeepCopy()
|
||||
newClass := resourceClass.DeepCopy()
|
||||
newClass.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newClass, resourceClass)
|
||||
errs := Strategy.ValidateUpdate(ctx, newClass, resourceClass)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("unexpected validation errors: %v", errs)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("name-change-not-allowed", func(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
resourceClass := resourceClass.DeepCopy()
|
||||
newClass := resourceClass.DeepCopy()
|
||||
newClass.Name = "valid-class-2"
|
||||
newClass.ResourceVersion = "4"
|
||||
|
||||
Strategy.PrepareForUpdate(ctx, newClass, resourceClass)
|
||||
errs := Strategy.ValidateUpdate(ctx, newClass, resourceClass)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected a validation error")
|
||||
}
|
||||
})
|
||||
}
|
91
pkg/registry/resource/rest/storage_resource.go
Normal file
91
pkg/registry/resource/rest/storage_resource.go
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rest
|
||||
|
||||
import (
|
||||
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/resource"
|
||||
podschedulingstore "k8s.io/kubernetes/pkg/registry/resource/podscheduling/storage"
|
||||
resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage"
|
||||
resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage"
|
||||
resourceclassstore "k8s.io/kubernetes/pkg/registry/resource/resourceclass/storage"
|
||||
)
|
||||
|
||||
type RESTStorageProvider struct{}
|
||||
|
||||
func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, error) {
|
||||
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(resource.GroupName, legacyscheme.Scheme, legacyscheme.ParameterCodec, legacyscheme.Codecs)
|
||||
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
|
||||
// TODO refactor the plumbing to provide the information in the APIGroupInfo
|
||||
|
||||
if storageMap, err := p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
||||
return genericapiserver.APIGroupInfo{}, err
|
||||
} else if len(storageMap) > 0 {
|
||||
apiGroupInfo.VersionedResourcesStorageMap[resourcev1alpha1.SchemeGroupVersion.Version] = storageMap
|
||||
}
|
||||
|
||||
return apiGroupInfo, nil
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) {
|
||||
storage := map[string]rest.Storage{}
|
||||
|
||||
if resource := "resourceclasses"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
resourceClassStorage, err := resourceclassstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage[resource] = resourceClassStorage
|
||||
}
|
||||
|
||||
if resource := "resourceclaims"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
resourceClaimStorage, resourceClaimStatusStorage, err := resourceclaimstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage[resource] = resourceClaimStorage
|
||||
storage[resource+"/status"] = resourceClaimStatusStorage
|
||||
}
|
||||
|
||||
if resource := "resourceclaimtemplates"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
resourceClaimTemplateStorage, err := resourceclaimtemplatestore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage[resource] = resourceClaimTemplateStorage
|
||||
}
|
||||
|
||||
if resource := "podschedulings"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
storage[resource] = podSchedulingStorage
|
||||
storage[resource+"/status"] = podSchedulingStatusStorage
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) GroupName() string {
|
||||
return resource.GroupName
|
||||
}
|
6
staging/src/k8s.io/api/resource/OWNERS
Normal file
6
staging/src/k8s.io/api/resource/OWNERS
Normal file
@ -0,0 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
reviewers:
|
||||
- bart0sh
|
||||
- klueska
|
||||
- pohly
|
24
staging/src/k8s.io/api/resource/v1alpha1/doc.go
Normal file
24
staging/src/k8s.io/api/resource/v1alpha1/doc.go
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
|
||||
// +groupName=resource.k8s.io
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the resource API.
|
||||
package v1alpha1 // import "k8s.io/api/resource/v1alpha1"
|
63
staging/src/k8s.io/api/resource/v1alpha1/register.go
Normal file
63
staging/src/k8s.io/api/resource/v1alpha1/register.go
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "resource.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&ResourceClass{},
|
||||
&ResourceClassList{},
|
||||
&ResourceClaim{},
|
||||
&ResourceClaimList{},
|
||||
&ResourceClaimTemplate{},
|
||||
&ResourceClaimTemplateList{},
|
||||
&PodScheduling{},
|
||||
&PodSchedulingList{},
|
||||
)
|
||||
|
||||
// Add common types
|
||||
scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{})
|
||||
|
||||
// Add the watch version that applies
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
429
staging/src/k8s.io/api/resource/v1alpha1/types.go
Normal file
429
staging/src/k8s.io/api/resource/v1alpha1/types.go
Normal file
@ -0,0 +1,429 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// ResourceClaim describes which resources are needed by a resource consumer.
|
||||
// Its status tracks whether the resource has been allocated and what the
|
||||
// resulting attributes are.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type ResourceClaim struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec describes the desired attributes of a resource that then needs
|
||||
// to be allocated. It can only be set once when creating the
|
||||
// ResourceClaim.
|
||||
Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
|
||||
// Status describes whether the resource is available and with which
|
||||
// attributes.
|
||||
// +optional
|
||||
Status ResourceClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// ResourceClaimSpec defines how a resource is to be allocated.
|
||||
type ResourceClaimSpec struct {
|
||||
// ResourceClassName references the driver and additional parameters
|
||||
// via the name of a ResourceClass that was created as part of the
|
||||
// driver deployment.
|
||||
ResourceClassName string `json:"resourceClassName" protobuf:"bytes,1,name=resourceClassName"`
|
||||
|
||||
// ParametersRef references a separate object with arbitrary parameters
|
||||
// that will be used by the driver when allocating a resource for the
|
||||
// claim.
|
||||
//
|
||||
// The object must be in the same namespace as the ResourceClaim.
|
||||
// +optional
|
||||
ParametersRef *ResourceClaimParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,2,opt,name=parametersRef"`
|
||||
|
||||
// Allocation can start immediately or when a Pod wants to use the
|
||||
// resource. "WaitForFirstConsumer" is the default.
|
||||
// +optional
|
||||
AllocationMode AllocationMode `json:"allocationMode,omitempty" protobuf:"bytes,3,opt,name=allocationMode"`
|
||||
}
|
||||
|
||||
// AllocationMode describes whether a ResourceClaim gets allocated immediately
|
||||
// when it gets created (AllocationModeImmediate) or whether allocation is
|
||||
// delayed until it is needed for a Pod
|
||||
// (AllocationModeWaitForFirstConsumer). Other modes might get added in the
|
||||
// future.
|
||||
type AllocationMode string
|
||||
|
||||
const (
|
||||
// When a ResourceClaim has AllocationModeWaitForFirstConsumer, allocation is
|
||||
// delayed until a Pod gets scheduled that needs the ResourceClaim. The
|
||||
// scheduler will consider all resource requirements of that Pod and
|
||||
// trigger allocation for a node that fits the Pod.
|
||||
AllocationModeWaitForFirstConsumer AllocationMode = "WaitForFirstConsumer"
|
||||
|
||||
// When a ResourceClaim has AllocationModeImmediate, allocation starts
|
||||
// as soon as the ResourceClaim gets created. This is done without
|
||||
// considering the needs of Pods that will use the ResourceClaim
|
||||
// because those Pods are not known yet.
|
||||
AllocationModeImmediate AllocationMode = "Immediate"
|
||||
)
|
||||
|
||||
// ResourceClaimStatus tracks whether the resource has been allocated and what
|
||||
// the resulting attributes are.
|
||||
type ResourceClaimStatus struct {
|
||||
// DriverName is a copy of the driver name from the ResourceClass at
|
||||
// the time when allocation started.
|
||||
// +optional
|
||||
DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
|
||||
|
||||
// Allocation is set by the resource driver once a resource has been
|
||||
// allocated successfully. If this is not specified, the resource is
|
||||
// not yet allocated.
|
||||
// +optional
|
||||
Allocation *AllocationResult `json:"allocation,omitempty" protobuf:"bytes,2,opt,name=allocation"`
|
||||
|
||||
// ReservedFor indicates which entities are currently allowed to use
|
||||
// the claim. A Pod which references a ResourceClaim which is not
|
||||
// reserved for that Pod will not be started.
|
||||
//
|
||||
// There can be at most 32 such reservations. This may get increased in
|
||||
// the future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +optional
|
||||
ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,3,opt,name=reservedFor"`
|
||||
|
||||
// DeallocationRequested indicates that a ResourceClaim is to be
|
||||
// deallocated.
|
||||
//
|
||||
// The driver then must deallocate this claim and reset the field
|
||||
// together with clearing the Allocation field.
|
||||
//
|
||||
// While DeallocationRequested is set, no new consumers may be added to
|
||||
// ReservedFor.
|
||||
// +optional
|
||||
DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"varint,4,opt,name=deallocationRequested"`
|
||||
}
|
||||
|
||||
// ReservedForMaxSize is the maximum number of entries in
|
||||
// claim.status.reservedFor.
|
||||
const ResourceClaimReservedForMaxSize = 32
|
||||
|
||||
// AllocationResult contains attributed of an allocated resource.
|
||||
type AllocationResult struct {
|
||||
// ResourceHandle contains arbitrary data returned by the driver after a
|
||||
// successful allocation. This is opaque for
|
||||
// Kubernetes. Driver documentation may explain to users how to
|
||||
// interpret this data if needed.
|
||||
//
|
||||
// The maximum size of this field is 16KiB. This may get
|
||||
// increased in the future, but not reduced.
|
||||
// +optional
|
||||
ResourceHandle string `json:"resourceHandle,omitempty" protobuf:"bytes,1,opt,name=resourceHandle"`
|
||||
|
||||
// This field will get set by the resource driver after it has
|
||||
// allocated the resource driver to inform the scheduler where it can
|
||||
// schedule Pods using the ResourceClaim.
|
||||
//
|
||||
// Setting this field is optional. If null, the resource is available
|
||||
// everywhere.
|
||||
// +optional
|
||||
AvailableOnNodes *v1.NodeSelector `json:"availableOnNodes,omitempty" protobuf:"bytes,2,opt,name=availableOnNodes"`
|
||||
|
||||
// Shareable determines whether the resource supports more
|
||||
// than one consumer at a time.
|
||||
// +optional
|
||||
Shareable bool `json:"shareable,omitempty" protobuf:"varint,3,opt,name=shareable"`
|
||||
}
|
||||
|
||||
// ResourceHandleMaxSize is the maximum size of allocation.resourceHandle.
|
||||
const ResourceHandleMaxSize = 16 * 1024
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// ResourceClaimList is a collection of claims.
|
||||
type ResourceClaimList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of resource claims.
|
||||
Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// PodScheduling objects hold information that is needed to schedule
|
||||
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
|
||||
// mode.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type PodScheduling struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec describes where resources for the Pod are needed.
|
||||
Spec PodSchedulingSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
|
||||
// Status describes where resources for the Pod can be allocated.
|
||||
// +optional
|
||||
Status PodSchedulingStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// PodSchedulingSpec describes where resources for the Pod are needed.
|
||||
type PodSchedulingSpec struct {
|
||||
// SelectedNode is the node for which allocation of ResourceClaims that
|
||||
// are referenced by the Pod and that use "WaitForFirstConsumer"
|
||||
// allocation is to be attempted.
|
||||
// +optional
|
||||
SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"`
|
||||
|
||||
// PotentialNodes lists nodes where the Pod might be able to run.
|
||||
//
|
||||
// The size of this field is limited to 128. This is large enough for
|
||||
// many clusters. Larger clusters may need more attempts to find a node
|
||||
// that suits all pending resources. This may get increased in the
|
||||
// future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +optional
|
||||
PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"`
|
||||
}
|
||||
|
||||
// PodSchedulingStatus describes where resources for the Pod can be allocated.
|
||||
type PodSchedulingStatus struct {
|
||||
// ResourceClaims describes resource availability for each
|
||||
// pod.spec.resourceClaim entry where the corresponding ResourceClaim
|
||||
// uses "WaitForFirstConsumer" allocation mode.
|
||||
//
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"`
|
||||
|
||||
// If there ever is a need to support other kinds of resources
|
||||
// than ResourceClaim, then new fields could get added here
|
||||
// for those other resources.
|
||||
}
|
||||
|
||||
// ResourceClaimSchedulingStatus contains information about one particular
|
||||
// ResourceClaim with "WaitForFirstConsumer" allocation mode.
|
||||
type ResourceClaimSchedulingStatus struct {
|
||||
// Name matches the pod.spec.resourceClaims[*].Name field.
|
||||
// +optional
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
|
||||
|
||||
// UnsuitableNodes lists nodes that the ResourceClaim cannot be
|
||||
// allocated for.
|
||||
//
|
||||
// The size of this field is limited to 128, the same as for
|
||||
// PodSchedulingSpec.PotentialNodes. This may get increased in the
|
||||
// future, but not reduced.
|
||||
//
|
||||
// +listType=set
|
||||
// +optional
|
||||
UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"`
|
||||
}
|
||||
|
||||
// PodSchedulingNodeListMaxSize defines the maximum number of entries in the
|
||||
// node lists that are stored in PodScheduling objects. This limit is part
|
||||
// of the API.
|
||||
const PodSchedulingNodeListMaxSize = 128
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// PodSchedulingList is a collection of Pod scheduling objects.
|
||||
type PodSchedulingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of PodScheduling objects.
|
||||
Items []PodScheduling `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// ResourceClass is used by administrators to influence how resources
|
||||
// are allocated.
|
||||
//
|
||||
// This is an alpha type and requires enabling the DynamicResourceAllocation
|
||||
// feature gate.
|
||||
type ResourceClass struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// DriverName defines the name of the dynamic resource driver that is
|
||||
// used for allocation of a ResourceClaim that uses this class.
|
||||
//
|
||||
// Resource drivers have a unique name in forward domain order
|
||||
// (acme.example.com).
|
||||
DriverName string `json:"driverName" protobuf:"bytes,2,name=driverName"`
|
||||
|
||||
// ParametersRef references an arbitrary separate object that may hold
|
||||
// parameters that will be used by the driver when allocating a
|
||||
// resource that uses this class. A dynamic resource driver can
|
||||
// distinguish between parameters stored here and and those stored in
|
||||
// ResourceClaimSpec.
|
||||
// +optional
|
||||
ParametersRef *ResourceClassParametersReference `json:"parametersRef,omitempty" protobuf:"bytes,3,opt,name=parametersRef"`
|
||||
|
||||
// Only nodes matching the selector will be considered by the scheduler
|
||||
// when trying to find a Node that fits a Pod when that Pod uses
|
||||
// a ResourceClaim that has not been allocated yet.
|
||||
//
|
||||
// Setting this field is optional. If null, all nodes are candidates.
|
||||
// +optional
|
||||
SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,4,opt,name=suitableNodes"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// ResourceClassList is a collection of classes.
|
||||
type ResourceClassList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of resource classes.
|
||||
Items []ResourceClass `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ResourceClassParametersReference contains enough information to let you
|
||||
// locate the parameters for a ResourceClass.
|
||||
type ResourceClassParametersReference struct {
|
||||
// APIGroup is the group for the resource being referenced. It is
|
||||
// empty for the core API. This matches the group in the APIVersion
|
||||
// that is used when creating the resources.
|
||||
// +optional
|
||||
APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
|
||||
// Kind is the type of resource being referenced. This is the same
|
||||
// value as in the parameter object's metadata.
|
||||
Kind string `json:"kind" protobuf:"bytes,2,name=kind"`
|
||||
// Name is the name of resource being referenced.
|
||||
Name string `json:"name" protobuf:"bytes,3,name=name"`
|
||||
// Namespace that contains the referenced resource. Must be empty
|
||||
// for cluster-scoped resources and non-empty for namespaced
|
||||
// resources.
|
||||
// +optional
|
||||
Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"`
|
||||
}
|
||||
|
||||
// ResourceClaimParametersReference contains enough information to let you
|
||||
// locate the parameters for a ResourceClaim. The object must be in the same
|
||||
// namespace as the ResourceClaim.
|
||||
type ResourceClaimParametersReference struct {
|
||||
// APIGroup is the group for the resource being referenced. It is
|
||||
// empty for the core API. This matches the group in the APIVersion
|
||||
// that is used when creating the resources.
|
||||
// +optional
|
||||
APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
|
||||
// Kind is the type of resource being referenced. This is the same
|
||||
// value as in the parameter object's metadata, for example "ConfigMap".
|
||||
Kind string `json:"kind" protobuf:"bytes,2,name=kind"`
|
||||
// Name is the name of resource being referenced.
|
||||
Name string `json:"name" protobuf:"bytes,3,name=name"`
|
||||
}
|
||||
|
||||
// ResourceClaimConsumerReference contains enough information to let you
|
||||
// locate the consumer of a ResourceClaim. The user must be a resource in the same
|
||||
// namespace as the ResourceClaim.
|
||||
type ResourceClaimConsumerReference struct {
|
||||
// APIGroup is the group for the resource being referenced. It is
|
||||
// empty for the core API. This matches the group in the APIVersion
|
||||
// that is used when creating the resources.
|
||||
// +optional
|
||||
APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,1,opt,name=apiGroup"`
|
||||
// Resource is the type of resource being referenced, for example "pods".
|
||||
Resource string `json:"resource" protobuf:"bytes,3,name=resource"`
|
||||
// Name is the name of resource being referenced.
|
||||
Name string `json:"name" protobuf:"bytes,4,name=name"`
|
||||
// UID identifies exactly one incarnation of the resource.
|
||||
UID types.UID `json:"uid" protobuf:"bytes,5,name=uid"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// ResourceClaimTemplate is used to produce ResourceClaim objects.
|
||||
type ResourceClaimTemplate struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Describes the ResourceClaim that is to be generated.
|
||||
//
|
||||
// This field is immutable. A ResourceClaim will get created by the
|
||||
// control plane for a Pod when needed and then not get updated
|
||||
// anymore.
|
||||
Spec ResourceClaimTemplateSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
}
|
||||
|
||||
// ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.
|
||||
type ResourceClaimTemplateSpec struct {
|
||||
// ObjectMeta may contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec for the ResourceClaim. The entire content is copied unchanged
|
||||
// into the ResourceClaim that gets created from this template. The
|
||||
// same fields as in a ResourceClaim are also valid here.
|
||||
Spec ResourceClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.26
|
||||
|
||||
// ResourceClaimTemplateList is a collection of claim templates.
|
||||
type ResourceClaimTemplateList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Items is the list of resource claim templates.
|
||||
Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
@ -66,6 +66,7 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
rbacv1alpha1 "k8s.io/api/rbac/v1alpha1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
resourcev1alpha1 "k8s.io/api/resource/v1alpha1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1"
|
||||
schedulingv1beta1 "k8s.io/api/scheduling/v1beta1"
|
||||
@ -128,6 +129,7 @@ var groups = []runtime.SchemeBuilder{
|
||||
rbacv1alpha1.SchemeBuilder,
|
||||
rbacv1beta1.SchemeBuilder,
|
||||
rbacv1.SchemeBuilder,
|
||||
resourcev1alpha1.SchemeBuilder,
|
||||
schedulingv1alpha1.SchemeBuilder,
|
||||
schedulingv1beta1.SchemeBuilder,
|
||||
schedulingv1.SchemeBuilder,
|
||||
|
@ -64,6 +64,8 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("resource.k8s.io", "v1alpha1", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulings.
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
}
|
||||
|
||||
@ -84,6 +86,10 @@ var noConflicts = map[string]struct{}{
|
||||
// namespaces only have a spec.finalizers field which is also skipped,
|
||||
// thus it will never have a conflict.
|
||||
"namespaces": {},
|
||||
// podschedulings.status only has a list which contains items with a list,
|
||||
// therefore apply works because it simply merges either the outer or
|
||||
// the inner list.
|
||||
"podschedulings": {},
|
||||
}
|
||||
|
||||
var image2 = image.GetE2EImage(image.Etcd)
|
||||
@ -140,6 +146,10 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
|
||||
gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`,
|
||||
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha1", "podschedulings"): `{"spec": {"selectedNode": "node2name"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclasses"): `{"driverName": "other.example.com"}`,
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{}`,
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,8 @@ var statusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("resource.k8s.io", "v1alpha1", "podschedulings"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
}
|
||||
|
||||
|
@ -459,6 +459,25 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/resource/v1alpha1
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclasses"): {
|
||||
Stub: `{"metadata": {"name": "class1name"}, "driverName": "example.com"}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclasses/class1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclaims"): {
|
||||
Stub: `{"metadata": {"name": "claim1name"}, "spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaims/" + namespace + "/claim1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha1", "resourceclaimtemplates"): {
|
||||
Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha1", "podschedulings"): {
|
||||
Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`,
|
||||
ExpectedEtcdPath: "/registry/podschedulings/" + namespace + "/pod1name",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/apiserver/pkg/apis/apiserverinternal/v1alpha1
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): {
|
||||
Stub: `{"metadata":{"name":"sv1.test"},"spec":{}}`,
|
||||
|
Loading…
Reference in New Issue
Block a user