Merge pull request #125470 from everpeace/kep-3619-SupplementalGroupsPolicy-e2e
KEP-3619: Add NodeStatus.Features.SupplementalGroupsPolicy API and e2e
This commit is contained in:
		
							
								
								
									
										16
									
								
								api/openapi-spec/swagger.json
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										16
									
								
								api/openapi-spec/swagger.json
									
									
									
										generated
									
									
									
								
							| @@ -8243,6 +8243,16 @@ | ||||
|       }, | ||||
|       "type": "object" | ||||
|     }, | ||||
|     "io.k8s.api.core.v1.NodeFeatures": { | ||||
|       "description": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", | ||||
|       "properties": { | ||||
|         "supplementalGroupsPolicy": { | ||||
|           "description": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", | ||||
|           "type": "boolean" | ||||
|         } | ||||
|       }, | ||||
|       "type": "object" | ||||
|     }, | ||||
|     "io.k8s.api.core.v1.NodeList": { | ||||
|       "description": "NodeList is the whole list of all Nodes which have been registered with master.", | ||||
|       "properties": { | ||||
| @@ -8293,7 +8303,7 @@ | ||||
|       "type": "object" | ||||
|     }, | ||||
|     "io.k8s.api.core.v1.NodeRuntimeHandlerFeatures": { | ||||
|       "description": "NodeRuntimeHandlerFeatures is a set of runtime features.", | ||||
|       "description": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", | ||||
|       "properties": { | ||||
|         "recursiveReadOnlyMounts": { | ||||
|           "description": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", | ||||
| @@ -8467,6 +8477,10 @@ | ||||
|           "$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints", | ||||
|           "description": "Endpoints of daemons running on the Node." | ||||
|         }, | ||||
|         "features": { | ||||
|           "$ref": "#/definitions/io.k8s.api.core.v1.NodeFeatures", | ||||
|           "description": "Features describes the set of features implemented by the CRI implementation." | ||||
|         }, | ||||
|         "images": { | ||||
|           "description": "List of container images on this node", | ||||
|           "items": { | ||||
|   | ||||
| @@ -3741,6 +3741,16 @@ | ||||
|         }, | ||||
|         "type": "object" | ||||
|       }, | ||||
|       "io.k8s.api.core.v1.NodeFeatures": { | ||||
|         "description": "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", | ||||
|         "properties": { | ||||
|           "supplementalGroupsPolicy": { | ||||
|             "description": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", | ||||
|             "type": "boolean" | ||||
|           } | ||||
|         }, | ||||
|         "type": "object" | ||||
|       }, | ||||
|       "io.k8s.api.core.v1.NodeList": { | ||||
|         "description": "NodeList is the whole list of all Nodes which have been registered with master.", | ||||
|         "properties": { | ||||
| @@ -3806,7 +3816,7 @@ | ||||
|         "type": "object" | ||||
|       }, | ||||
|       "io.k8s.api.core.v1.NodeRuntimeHandlerFeatures": { | ||||
|         "description": "NodeRuntimeHandlerFeatures is a set of runtime features.", | ||||
|         "description": "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", | ||||
|         "properties": { | ||||
|           "recursiveReadOnlyMounts": { | ||||
|             "description": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", | ||||
| @@ -4027,6 +4037,14 @@ | ||||
|             "default": {}, | ||||
|             "description": "Endpoints of daemons running on the Node." | ||||
|           }, | ||||
|           "features": { | ||||
|             "allOf": [ | ||||
|               { | ||||
|                 "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeFeatures" | ||||
|               } | ||||
|             ], | ||||
|             "description": "Features describes the set of features implemented by the CRI implementation." | ||||
|           }, | ||||
|           "images": { | ||||
|             "description": "List of container images on this node", | ||||
|             "items": { | ||||
|   | ||||
| @@ -4892,7 +4892,7 @@ type NodeDaemonEndpoints struct { | ||||
| 	KubeletEndpoint DaemonEndpoint | ||||
| } | ||||
|  | ||||
| // NodeRuntimeHandlerFeatures is a set of runtime features. | ||||
| // NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. | ||||
| type NodeRuntimeHandlerFeatures struct { | ||||
| 	// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. | ||||
| 	// +featureGate=RecursiveReadOnlyMounts | ||||
| @@ -4915,6 +4915,15 @@ type NodeRuntimeHandler struct { | ||||
| 	Features *NodeRuntimeHandlerFeatures | ||||
| } | ||||
|  | ||||
| // NodeFeatures describes the set of features implemented by the CRI implementation. | ||||
| // The features contained in the NodeFeatures should depend only on the cri implementation | ||||
| // independent of runtime handlers. | ||||
| type NodeFeatures struct { | ||||
| 	// SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. | ||||
| 	// +optional | ||||
| 	SupplementalGroupsPolicy *bool | ||||
| } | ||||
|  | ||||
| // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. | ||||
| type NodeSystemInfo struct { | ||||
| 	// MachineID reported by the node. For unique machine identification | ||||
| @@ -5030,6 +5039,10 @@ type NodeStatus struct { | ||||
| 	// +featureGate=UserNamespacesSupport | ||||
| 	// +optional | ||||
| 	RuntimeHandlers []NodeRuntimeHandler | ||||
| 	// Features describes the set of features implemented by the CRI implementation. | ||||
| 	// +featureGate=SupplementalGroupsPolicy | ||||
| 	// +optional | ||||
| 	Features *NodeFeatures | ||||
| } | ||||
|  | ||||
| // UniqueVolumeName defines the name of attached volume | ||||
|   | ||||
							
								
								
									
										32
									
								
								pkg/apis/core/v1/zz_generated.conversion.go
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										32
									
								
								pkg/apis/core/v1/zz_generated.conversion.go
									
									
									
										generated
									
									
									
								
							| @@ -1062,6 +1062,16 @@ func RegisterConversions(s *runtime.Scheme) error { | ||||
| 	}); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if err := s.AddGeneratedConversionFunc((*v1.NodeFeatures)(nil), (*core.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { | ||||
| 		return Convert_v1_NodeFeatures_To_core_NodeFeatures(a.(*v1.NodeFeatures), b.(*core.NodeFeatures), scope) | ||||
| 	}); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if err := s.AddGeneratedConversionFunc((*core.NodeFeatures)(nil), (*v1.NodeFeatures)(nil), func(a, b interface{}, scope conversion.Scope) error { | ||||
| 		return Convert_core_NodeFeatures_To_v1_NodeFeatures(a.(*core.NodeFeatures), b.(*v1.NodeFeatures), scope) | ||||
| 	}); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if err := s.AddGeneratedConversionFunc((*v1.NodeList)(nil), (*core.NodeList)(nil), func(a, b interface{}, scope conversion.Scope) error { | ||||
| 		return Convert_v1_NodeList_To_core_NodeList(a.(*v1.NodeList), b.(*core.NodeList), scope) | ||||
| 	}); err != nil { | ||||
| @@ -5067,6 +5077,26 @@ func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDae | ||||
| 	return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) | ||||
| } | ||||
|  | ||||
| func autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in *v1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error { | ||||
| 	out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy)) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Convert_v1_NodeFeatures_To_core_NodeFeatures is an autogenerated conversion function. | ||||
| func Convert_v1_NodeFeatures_To_core_NodeFeatures(in *v1.NodeFeatures, out *core.NodeFeatures, s conversion.Scope) error { | ||||
| 	return autoConvert_v1_NodeFeatures_To_core_NodeFeatures(in, out, s) | ||||
| } | ||||
|  | ||||
| func autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *v1.NodeFeatures, s conversion.Scope) error { | ||||
| 	out.SupplementalGroupsPolicy = (*bool)(unsafe.Pointer(in.SupplementalGroupsPolicy)) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Convert_core_NodeFeatures_To_v1_NodeFeatures is an autogenerated conversion function. | ||||
| func Convert_core_NodeFeatures_To_v1_NodeFeatures(in *core.NodeFeatures, out *v1.NodeFeatures, s conversion.Scope) error { | ||||
| 	return autoConvert_core_NodeFeatures_To_v1_NodeFeatures(in, out, s) | ||||
| } | ||||
|  | ||||
| func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { | ||||
| 	out.ListMeta = in.ListMeta | ||||
| 	if in.Items != nil { | ||||
| @@ -5295,6 +5325,7 @@ func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.N | ||||
| 	out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) | ||||
| 	out.Config = (*core.NodeConfigStatus)(unsafe.Pointer(in.Config)) | ||||
| 	out.RuntimeHandlers = *(*[]core.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers)) | ||||
| 	out.Features = (*core.NodeFeatures)(unsafe.Pointer(in.Features)) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| @@ -5320,6 +5351,7 @@ func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.N | ||||
| 	out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) | ||||
| 	out.Config = (*v1.NodeConfigStatus)(unsafe.Pointer(in.Config)) | ||||
| 	out.RuntimeHandlers = *(*[]v1.NodeRuntimeHandler)(unsafe.Pointer(&in.RuntimeHandlers)) | ||||
| 	out.Features = (*v1.NodeFeatures)(unsafe.Pointer(in.Features)) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
|   | ||||
							
								
								
									
										26
									
								
								pkg/apis/core/zz_generated.deepcopy.go
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										26
									
								
								pkg/apis/core/zz_generated.deepcopy.go
									
									
									
										generated
									
									
									
								
							| @@ -2718,6 +2718,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. | ||||
| func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) { | ||||
| 	*out = *in | ||||
| 	if in.SupplementalGroupsPolicy != nil { | ||||
| 		in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy | ||||
| 		*out = new(bool) | ||||
| 		**out = **in | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures. | ||||
| func (in *NodeFeatures) DeepCopy() *NodeFeatures { | ||||
| 	if in == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	out := new(NodeFeatures) | ||||
| 	in.DeepCopyInto(out) | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. | ||||
| func (in *NodeList) DeepCopyInto(out *NodeList) { | ||||
| 	*out = *in | ||||
| @@ -2990,6 +3011,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { | ||||
| 			(*in)[i].DeepCopyInto(&(*out)[i]) | ||||
| 		} | ||||
| 	} | ||||
| 	if in.Features != nil { | ||||
| 		in, out := &in.Features, &out.Features | ||||
| 		*out = new(NodeFeatures) | ||||
| 		(*in).DeepCopyInto(*out) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
|   | ||||
							
								
								
									
										31
									
								
								pkg/generated/openapi/zz_generated.openapi.go
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										31
									
								
								pkg/generated/openapi/zz_generated.openapi.go
									
									
									
										generated
									
									
									
								
							| @@ -479,6 +479,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA | ||||
| 		"k8s.io/api/core/v1.NodeConfigSource":                                                                   schema_k8sio_api_core_v1_NodeConfigSource(ref), | ||||
| 		"k8s.io/api/core/v1.NodeConfigStatus":                                                                   schema_k8sio_api_core_v1_NodeConfigStatus(ref), | ||||
| 		"k8s.io/api/core/v1.NodeDaemonEndpoints":                                                                schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref), | ||||
| 		"k8s.io/api/core/v1.NodeFeatures":                                                                       schema_k8sio_api_core_v1_NodeFeatures(ref), | ||||
| 		"k8s.io/api/core/v1.NodeList":                                                                           schema_k8sio_api_core_v1_NodeList(ref), | ||||
| 		"k8s.io/api/core/v1.NodeProxyOptions":                                                                   schema_k8sio_api_core_v1_NodeProxyOptions(ref), | ||||
| 		"k8s.io/api/core/v1.NodeRuntimeHandler":                                                                 schema_k8sio_api_core_v1_NodeRuntimeHandler(ref), | ||||
| @@ -24418,6 +24419,26 @@ func schema_k8sio_api_core_v1_NodeDaemonEndpoints(ref common.ReferenceCallback) | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func schema_k8sio_api_core_v1_NodeFeatures(ref common.ReferenceCallback) common.OpenAPIDefinition { | ||||
| 	return common.OpenAPIDefinition{ | ||||
| 		Schema: spec.Schema{ | ||||
| 			SchemaProps: spec.SchemaProps{ | ||||
| 				Description: "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", | ||||
| 				Type:        []string{"object"}, | ||||
| 				Properties: map[string]spec.Schema{ | ||||
| 					"supplementalGroupsPolicy": { | ||||
| 						SchemaProps: spec.SchemaProps{ | ||||
| 							Description: "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", | ||||
| 							Type:        []string{"boolean"}, | ||||
| 							Format:      "", | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func schema_k8sio_api_core_v1_NodeList(ref common.ReferenceCallback) common.OpenAPIDefinition { | ||||
| 	return common.OpenAPIDefinition{ | ||||
| 		Schema: spec.Schema{ | ||||
| @@ -24536,7 +24557,7 @@ func schema_k8sio_api_core_v1_NodeRuntimeHandlerFeatures(ref common.ReferenceCal | ||||
| 	return common.OpenAPIDefinition{ | ||||
| 		Schema: spec.Schema{ | ||||
| 			SchemaProps: spec.SchemaProps{ | ||||
| 				Description: "NodeRuntimeHandlerFeatures is a set of runtime features.", | ||||
| 				Description: "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", | ||||
| 				Type:        []string{"object"}, | ||||
| 				Properties: map[string]spec.Schema{ | ||||
| 					"recursiveReadOnlyMounts": { | ||||
| @@ -24985,11 +25006,17 @@ func schema_k8sio_api_core_v1_NodeStatus(ref common.ReferenceCallback) common.Op | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 					"features": { | ||||
| 						SchemaProps: spec.SchemaProps{ | ||||
| 							Description: "Features describes the set of features implemented by the CRI implementation.", | ||||
| 							Ref:         ref("k8s.io/api/core/v1.NodeFeatures"), | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		Dependencies: []string{ | ||||
| 			"k8s.io/api/core/v1.AttachedVolume", "k8s.io/api/core/v1.ContainerImage", "k8s.io/api/core/v1.NodeAddress", "k8s.io/api/core/v1.NodeCondition", "k8s.io/api/core/v1.NodeConfigStatus", "k8s.io/api/core/v1.NodeDaemonEndpoints", "k8s.io/api/core/v1.NodeRuntimeHandler", "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, | ||||
| 			"k8s.io/api/core/v1.AttachedVolume", "k8s.io/api/core/v1.ContainerImage", "k8s.io/api/core/v1.NodeAddress", "k8s.io/api/core/v1.NodeCondition", "k8s.io/api/core/v1.NodeConfigStatus", "k8s.io/api/core/v1.NodeDaemonEndpoints", "k8s.io/api/core/v1.NodeFeatures", "k8s.io/api/core/v1.NodeRuntimeHandler", "k8s.io/api/core/v1.NodeSystemInfo", "k8s.io/apimachinery/pkg/api/resource.Quantity"}, | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
|   | ||||
| @@ -556,6 +556,8 @@ type RuntimeStatus struct { | ||||
| 	Conditions []RuntimeCondition | ||||
| 	// Handlers is an array of current available handlers | ||||
| 	Handlers []RuntimeHandler | ||||
| 	// Features is the set of features implemented by the runtime | ||||
| 	Features *RuntimeFeatures | ||||
| } | ||||
|  | ||||
| // GetRuntimeCondition gets a specified runtime condition from the runtime status. | ||||
| @@ -579,7 +581,7 @@ func (r *RuntimeStatus) String() string { | ||||
| 	for _, h := range r.Handlers { | ||||
| 		sh = append(sh, h.String()) | ||||
| 	} | ||||
| 	return fmt.Sprintf("Runtime Conditions: %s; Handlers: %s", strings.Join(ss, ", "), strings.Join(sh, ", ")) | ||||
| 	return fmt.Sprintf("Runtime Conditions: %s; Handlers: %s, Features: %s", strings.Join(ss, ", "), strings.Join(sh, ", "), r.Features.String()) | ||||
| } | ||||
|  | ||||
| // RuntimeHandler contains condition information for the runtime handler. | ||||
| @@ -617,6 +619,19 @@ func (c *RuntimeCondition) String() string { | ||||
| 	return fmt.Sprintf("%s=%t reason:%s message:%s", c.Type, c.Status, c.Reason, c.Message) | ||||
| } | ||||
|  | ||||
| // RuntimeFeatures contains the set of features implemented by the runtime | ||||
| type RuntimeFeatures struct { | ||||
| 	SupplementalGroupsPolicy bool | ||||
| } | ||||
|  | ||||
| // String formats the runtime condition into a human readable string. | ||||
| func (f *RuntimeFeatures) String() string { | ||||
| 	if f == nil { | ||||
| 		return "nil" | ||||
| 	} | ||||
| 	return fmt.Sprintf("SupplementalGroupsPolicy: %v", f.SupplementalGroupsPolicy) | ||||
| } | ||||
|  | ||||
| // Pods represents the list of pods | ||||
| type Pods []*Pod | ||||
|  | ||||
|   | ||||
| @@ -2892,6 +2892,7 @@ func (kl *Kubelet) updateRuntimeUp() { | ||||
|  | ||||
| 	kl.runtimeState.setRuntimeState(nil) | ||||
| 	kl.runtimeState.setRuntimeHandlers(s.Handlers) | ||||
| 	kl.runtimeState.setRuntimeFeatures(s.Features) | ||||
| 	kl.oneTimeInitializer.Do(kl.initializeRuntimeDependentModules) | ||||
| 	kl.runtimeState.setRuntimeSync(kl.clock.Now()) | ||||
| } | ||||
|   | ||||
| @@ -737,6 +737,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(context.Context, *v1.Node) er | ||||
| 		nodestatus.Images(kl.nodeStatusMaxImages, kl.imageManager.GetImageList), | ||||
| 		nodestatus.GoRuntime(), | ||||
| 		nodestatus.RuntimeHandlers(kl.runtimeState.runtimeHandlers), | ||||
| 		nodestatus.NodeFeatures(kl.runtimeState.runtimeFeatures), | ||||
| 	) | ||||
|  | ||||
| 	setters = append(setters, | ||||
|   | ||||
| @@ -208,7 +208,7 @@ func parsePodUIDFromLogsDirectory(name string) types.UID { | ||||
| } | ||||
|  | ||||
| // toKubeRuntimeStatus converts the runtimeapi.RuntimeStatus to kubecontainer.RuntimeStatus. | ||||
| func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus, handlers []*runtimeapi.RuntimeHandler) *kubecontainer.RuntimeStatus { | ||||
| func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus, handlers []*runtimeapi.RuntimeHandler, features *runtimeapi.RuntimeFeatures) *kubecontainer.RuntimeStatus { | ||||
| 	conditions := []kubecontainer.RuntimeCondition{} | ||||
| 	for _, c := range status.GetConditions() { | ||||
| 		conditions = append(conditions, kubecontainer.RuntimeCondition{ | ||||
| @@ -232,7 +232,13 @@ func toKubeRuntimeStatus(status *runtimeapi.RuntimeStatus, handlers []*runtimeap | ||||
| 			SupportsUserNamespaces:          supportsUserns, | ||||
| 		} | ||||
| 	} | ||||
| 	return &kubecontainer.RuntimeStatus{Conditions: conditions, Handlers: retHandlers} | ||||
| 	var retFeatures *kubecontainer.RuntimeFeatures | ||||
| 	if features != nil { | ||||
| 		retFeatures = &kubecontainer.RuntimeFeatures{ | ||||
| 			SupplementalGroupsPolicy: features.SupplementalGroupsPolicy, | ||||
| 		} | ||||
| 	} | ||||
| 	return &kubecontainer.RuntimeStatus{Conditions: conditions, Handlers: retHandlers, Features: retFeatures} | ||||
| } | ||||
|  | ||||
| func fieldSeccompProfile(scmp *v1.SeccompProfile, profileRootPath string, fallbackToRuntimeDefault bool) (*runtimeapi.SecurityProfile, error) { | ||||
|   | ||||
| @@ -347,7 +347,7 @@ func (m *kubeGenericRuntimeManager) Status(ctx context.Context) (*kubecontainer. | ||||
| 	if resp.GetStatus() == nil { | ||||
| 		return nil, errors.New("runtime status is nil") | ||||
| 	} | ||||
| 	return toKubeRuntimeStatus(resp.GetStatus(), resp.GetRuntimeHandlers()), nil | ||||
| 	return toKubeRuntimeStatus(resp.GetStatus(), resp.GetRuntimeHandlers(), resp.GetFeatures()), nil | ||||
| } | ||||
|  | ||||
| // GetPods returns a list of containers grouped by pods. The boolean parameter | ||||
|   | ||||
| @@ -482,6 +482,23 @@ func GoRuntime() Setter { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NodeFeatures returns a Setter that sets NodeFeatures on the node. | ||||
| func NodeFeatures(featuresGetter func() *kubecontainer.RuntimeFeatures) Setter { | ||||
| 	return func(ctx context.Context, node *v1.Node) error { | ||||
| 		if !utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) { | ||||
| 			return nil | ||||
| 		} | ||||
| 		features := featuresGetter() | ||||
| 		if features == nil { | ||||
| 			return nil | ||||
| 		} | ||||
| 		node.Status.Features = &v1.NodeFeatures{ | ||||
| 			SupplementalGroupsPolicy: &features.SupplementalGroupsPolicy, | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RuntimeHandlers returns a Setter that sets RuntimeHandlers on the node. | ||||
| func RuntimeHandlers(fn func() []kubecontainer.RuntimeHandler) Setter { | ||||
| 	return func(ctx context.Context, node *v1.Node) error { | ||||
|   | ||||
| @@ -36,6 +36,7 @@ type runtimeState struct { | ||||
| 	cidr                     string | ||||
| 	healthChecks             []*healthCheck | ||||
| 	rtHandlers               []kubecontainer.RuntimeHandler | ||||
| 	rtFeatures               *kubecontainer.RuntimeFeatures | ||||
| } | ||||
|  | ||||
| // A health check function should be efficient and not rely on external | ||||
| @@ -83,6 +84,18 @@ func (s *runtimeState) runtimeHandlers() []kubecontainer.RuntimeHandler { | ||||
| 	return s.rtHandlers | ||||
| } | ||||
|  | ||||
| func (s *runtimeState) setRuntimeFeatures(features *kubecontainer.RuntimeFeatures) { | ||||
| 	s.Lock() | ||||
| 	defer s.Unlock() | ||||
| 	s.rtFeatures = features | ||||
| } | ||||
|  | ||||
| func (s *runtimeState) runtimeFeatures() *kubecontainer.RuntimeFeatures { | ||||
| 	s.RLock() | ||||
| 	defer s.RUnlock() | ||||
| 	return s.rtFeatures | ||||
| } | ||||
|  | ||||
| func (s *runtimeState) setStorageState(err error) { | ||||
| 	s.Lock() | ||||
| 	defer s.Unlock() | ||||
|   | ||||
| @@ -106,6 +106,10 @@ func dropDisabledFields(node *api.Node, oldNode *api.Node) { | ||||
| 	if !utilfeature.DefaultFeatureGate.Enabled(features.RecursiveReadOnlyMounts) && !utilfeature.DefaultFeatureGate.Enabled(features.UserNamespacesSupport) { | ||||
| 		node.Status.RuntimeHandlers = nil | ||||
| 	} | ||||
|  | ||||
| 	if !utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) && !supplementalGroupsPolicyInUse(oldNode) { | ||||
| 		node.Status.Features = nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // nodeConfigSourceInUse returns true if node's Spec ConfigSource is set(used) | ||||
| @@ -295,3 +299,11 @@ func fieldIsDeprecatedWarnings(obj runtime.Object) []string { | ||||
| 	} | ||||
| 	return warnings | ||||
| } | ||||
|  | ||||
| // supplementalGroupsPolicyInUse returns true if the node.status has NodeFeature | ||||
| func supplementalGroupsPolicyInUse(node *api.Node) bool { | ||||
| 	if node == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return node.Status.Features != nil | ||||
| } | ||||
|   | ||||
							
								
								
									
										2440
									
								
								staging/src/k8s.io/api/core/v1/generated.pb.go
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										2440
									
								
								staging/src/k8s.io/api/core/v1/generated.pb.go
									
									
									
										generated
									
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -2598,6 +2598,15 @@ message NodeDaemonEndpoints { | ||||
|   optional DaemonEndpoint kubeletEndpoint = 1; | ||||
| } | ||||
|  | ||||
| // NodeFeatures describes the set of features implemented by the CRI implementation. | ||||
| // The features contained in the NodeFeatures should depend only on the cri implementation | ||||
| // independent of runtime handlers. | ||||
| message NodeFeatures { | ||||
|   // SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. | ||||
|   // +optional | ||||
|   optional bool supplementalGroupsPolicy = 1; | ||||
| } | ||||
|  | ||||
| // NodeList is the whole list of all Nodes which have been registered with master. | ||||
| message NodeList { | ||||
|   // Standard list metadata. | ||||
| @@ -2628,7 +2637,7 @@ message NodeRuntimeHandler { | ||||
|   optional NodeRuntimeHandlerFeatures features = 2; | ||||
| } | ||||
|  | ||||
| // NodeRuntimeHandlerFeatures is a set of runtime features. | ||||
| // NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. | ||||
| message NodeRuntimeHandlerFeatures { | ||||
|   // RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. | ||||
|   // +featureGate=RecursiveReadOnlyMounts | ||||
| @@ -2803,6 +2812,11 @@ message NodeStatus { | ||||
|   // +optional | ||||
|   // +listType=atomic | ||||
|   repeated NodeRuntimeHandler runtimeHandlers = 12; | ||||
|  | ||||
|   // Features describes the set of features implemented by the CRI implementation. | ||||
|   // +featureGate=SupplementalGroupsPolicy | ||||
|   // +optional | ||||
|   optional NodeFeatures features = 13; | ||||
| } | ||||
|  | ||||
| // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. | ||||
|   | ||||
| @@ -5871,7 +5871,7 @@ type NodeDaemonEndpoints struct { | ||||
| 	KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` | ||||
| } | ||||
|  | ||||
| // NodeRuntimeHandlerFeatures is a set of runtime features. | ||||
| // NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler. | ||||
| type NodeRuntimeHandlerFeatures struct { | ||||
| 	// RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts. | ||||
| 	// +featureGate=RecursiveReadOnlyMounts | ||||
| @@ -5894,6 +5894,15 @@ type NodeRuntimeHandler struct { | ||||
| 	Features *NodeRuntimeHandlerFeatures `json:"features,omitempty" protobuf:"bytes,2,opt,name=features"` | ||||
| } | ||||
|  | ||||
| // NodeFeatures describes the set of features implemented by the CRI implementation. | ||||
| // The features contained in the NodeFeatures should depend only on the cri implementation | ||||
| // independent of runtime handlers. | ||||
| type NodeFeatures struct { | ||||
| 	// SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. | ||||
| 	// +optional | ||||
| 	SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty" protobuf:"varint,1,opt,name=supplementalGroupsPolicy"` | ||||
| } | ||||
|  | ||||
| // NodeSystemInfo is a set of ids/uuids to uniquely identify the node. | ||||
| type NodeSystemInfo struct { | ||||
| 	// MachineID reported by the node. For unique machine identification | ||||
| @@ -6036,6 +6045,10 @@ type NodeStatus struct { | ||||
| 	// +optional | ||||
| 	// +listType=atomic | ||||
| 	RuntimeHandlers []NodeRuntimeHandler `json:"runtimeHandlers,omitempty" protobuf:"bytes,12,rep,name=runtimeHandlers"` | ||||
| 	// Features describes the set of features implemented by the CRI implementation. | ||||
| 	// +featureGate=SupplementalGroupsPolicy | ||||
| 	// +optional | ||||
| 	Features *NodeFeatures `json:"features,omitempty" protobuf:"bytes,13,rep,name=features"` | ||||
| } | ||||
|  | ||||
| type UniqueVolumeName string | ||||
|   | ||||
| @@ -1206,6 +1206,15 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { | ||||
| 	return map_NodeDaemonEndpoints | ||||
| } | ||||
|  | ||||
| var map_NodeFeatures = map[string]string{ | ||||
| 	"":                         "NodeFeatures describes the set of features implemented by the CRI implementation. The features contained in the NodeFeatures should depend only on the cri implementation independent of runtime handlers.", | ||||
| 	"supplementalGroupsPolicy": "SupplementalGroupsPolicy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser.", | ||||
| } | ||||
|  | ||||
| func (NodeFeatures) SwaggerDoc() map[string]string { | ||||
| 	return map_NodeFeatures | ||||
| } | ||||
|  | ||||
| var map_NodeList = map[string]string{ | ||||
| 	"":         "NodeList is the whole list of all Nodes which have been registered with master.", | ||||
| 	"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", | ||||
| @@ -1236,7 +1245,7 @@ func (NodeRuntimeHandler) SwaggerDoc() map[string]string { | ||||
| } | ||||
|  | ||||
| var map_NodeRuntimeHandlerFeatures = map[string]string{ | ||||
| 	"":                        "NodeRuntimeHandlerFeatures is a set of runtime features.", | ||||
| 	"":                        "NodeRuntimeHandlerFeatures is a set of features implemented by the runtime handler.", | ||||
| 	"recursiveReadOnlyMounts": "RecursiveReadOnlyMounts is set to true if the runtime handler supports RecursiveReadOnlyMounts.", | ||||
| 	"userNamespaces":          "UserNamespaces is set to true if the runtime handler supports UserNamespaces, including for volumes.", | ||||
| } | ||||
| @@ -1304,6 +1313,7 @@ var map_NodeStatus = map[string]string{ | ||||
| 	"volumesAttached": "List of volumes that are attached to the node.", | ||||
| 	"config":          "Status of the config assigned to the node via the dynamic Kubelet config feature.", | ||||
| 	"runtimeHandlers": "The available runtime handlers.", | ||||
| 	"features":        "Features describes the set of features implemented by the CRI implementation.", | ||||
| } | ||||
|  | ||||
| func (NodeStatus) SwaggerDoc() map[string]string { | ||||
|   | ||||
| @@ -2716,6 +2716,27 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. | ||||
| func (in *NodeFeatures) DeepCopyInto(out *NodeFeatures) { | ||||
| 	*out = *in | ||||
| 	if in.SupplementalGroupsPolicy != nil { | ||||
| 		in, out := &in.SupplementalGroupsPolicy, &out.SupplementalGroupsPolicy | ||||
| 		*out = new(bool) | ||||
| 		**out = **in | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeFeatures. | ||||
| func (in *NodeFeatures) DeepCopy() *NodeFeatures { | ||||
| 	if in == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	out := new(NodeFeatures) | ||||
| 	in.DeepCopyInto(out) | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. | ||||
| func (in *NodeList) DeepCopyInto(out *NodeList) { | ||||
| 	*out = *in | ||||
| @@ -2988,6 +3009,11 @@ func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { | ||||
| 			(*in)[i].DeepCopyInto(&(*out)[i]) | ||||
| 		} | ||||
| 	} | ||||
| 	if in.Features != nil { | ||||
| 		in, out := &in.Features, &out.Features | ||||
| 		*out = new(NodeFeatures) | ||||
| 		(*in).DeepCopyInto(*out) | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -165,6 +165,9 @@ | ||||
|           "userNamespaces": true | ||||
|         } | ||||
|       } | ||||
|     ] | ||||
|     ], | ||||
|     "features": { | ||||
|       "supplementalGroupsPolicy": true | ||||
|     } | ||||
|   } | ||||
| } | ||||
							
								
								
									
										
											BIN
										
									
								
								staging/src/k8s.io/api/testdata/HEAD/core.v1.Node.pb
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								staging/src/k8s.io/api/testdata/HEAD/core.v1.Node.pb
									
									
									
									
										vendored
									
									
								
							
										
											Binary file not shown.
										
									
								
							| @@ -92,6 +92,8 @@ status: | ||||
|   daemonEndpoints: | ||||
|     kubeletEndpoint: | ||||
|       Port: 1 | ||||
|   features: | ||||
|     supplementalGroupsPolicy: true | ||||
|   images: | ||||
|   - names: | ||||
|     - namesValue | ||||
|   | ||||
| @@ -0,0 +1,39 @@ | ||||
| /* | ||||
| Copyright The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| // Code generated by applyconfiguration-gen. DO NOT EDIT. | ||||
|  | ||||
| package v1 | ||||
|  | ||||
| // NodeFeaturesApplyConfiguration represents a declarative configuration of the NodeFeatures type for use | ||||
| // with apply. | ||||
| type NodeFeaturesApplyConfiguration struct { | ||||
| 	SupplementalGroupsPolicy *bool `json:"supplementalGroupsPolicy,omitempty"` | ||||
| } | ||||
|  | ||||
| // NodeFeaturesApplyConfiguration constructs a declarative configuration of the NodeFeatures type for use with | ||||
| // apply. | ||||
| func NodeFeatures() *NodeFeaturesApplyConfiguration { | ||||
| 	return &NodeFeaturesApplyConfiguration{} | ||||
| } | ||||
|  | ||||
| // WithSupplementalGroupsPolicy sets the SupplementalGroupsPolicy field in the declarative configuration to the given value | ||||
| // and returns the receiver, so that objects can be built by chaining "With" function invocations. | ||||
| // If called multiple times, the SupplementalGroupsPolicy field is set to the value of the last call. | ||||
| func (b *NodeFeaturesApplyConfiguration) WithSupplementalGroupsPolicy(value bool) *NodeFeaturesApplyConfiguration { | ||||
| 	b.SupplementalGroupsPolicy = &value | ||||
| 	return b | ||||
| } | ||||
| @@ -37,6 +37,7 @@ type NodeStatusApplyConfiguration struct { | ||||
| 	VolumesAttached []AttachedVolumeApplyConfiguration     `json:"volumesAttached,omitempty"` | ||||
| 	Config          *NodeConfigStatusApplyConfiguration    `json:"config,omitempty"` | ||||
| 	RuntimeHandlers []NodeRuntimeHandlerApplyConfiguration `json:"runtimeHandlers,omitempty"` | ||||
| 	Features        *NodeFeaturesApplyConfiguration        `json:"features,omitempty"` | ||||
| } | ||||
|  | ||||
| // NodeStatusApplyConfiguration constructs a declarative configuration of the NodeStatus type for use with | ||||
| @@ -167,3 +168,11 @@ func (b *NodeStatusApplyConfiguration) WithRuntimeHandlers(values ...*NodeRuntim | ||||
| 	} | ||||
| 	return b | ||||
| } | ||||
|  | ||||
| // WithFeatures sets the Features field in the declarative configuration to the given value | ||||
| // and returns the receiver, so that objects can be built by chaining "With" function invocations. | ||||
| // If called multiple times, the Features field is set to the value of the last call. | ||||
| func (b *NodeStatusApplyConfiguration) WithFeatures(value *NodeFeaturesApplyConfiguration) *NodeStatusApplyConfiguration { | ||||
| 	b.Features = value | ||||
| 	return b | ||||
| } | ||||
|   | ||||
| @@ -6103,6 +6103,12 @@ var schemaYAML = typed.YAMLObject(`types: | ||||
|       type: | ||||
|         namedType: io.k8s.api.core.v1.DaemonEndpoint | ||||
|       default: {} | ||||
| - name: io.k8s.api.core.v1.NodeFeatures | ||||
|   map: | ||||
|     fields: | ||||
|     - name: supplementalGroupsPolicy | ||||
|       type: | ||||
|         scalar: boolean | ||||
| - name: io.k8s.api.core.v1.NodeRuntimeHandler | ||||
|   map: | ||||
|     fields: | ||||
| @@ -6231,6 +6237,9 @@ var schemaYAML = typed.YAMLObject(`types: | ||||
|       type: | ||||
|         namedType: io.k8s.api.core.v1.NodeDaemonEndpoints | ||||
|       default: {} | ||||
|     - name: features | ||||
|       type: | ||||
|         namedType: io.k8s.api.core.v1.NodeFeatures | ||||
|     - name: images | ||||
|       type: | ||||
|         list: | ||||
|   | ||||
| @@ -806,6 +806,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} { | ||||
| 		return &applyconfigurationscorev1.NodeConfigStatusApplyConfiguration{} | ||||
| 	case corev1.SchemeGroupVersion.WithKind("NodeDaemonEndpoints"): | ||||
| 		return &applyconfigurationscorev1.NodeDaemonEndpointsApplyConfiguration{} | ||||
| 	case corev1.SchemeGroupVersion.WithKind("NodeFeatures"): | ||||
| 		return &applyconfigurationscorev1.NodeFeaturesApplyConfiguration{} | ||||
| 	case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandler"): | ||||
| 		return &applyconfigurationscorev1.NodeRuntimeHandlerApplyConfiguration{} | ||||
| 	case corev1.SchemeGroupVersion.WithKind("NodeRuntimeHandlerFeatures"): | ||||
|   | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1600,6 +1600,7 @@ message StatusRequest { | ||||
|     bool verbose = 1; | ||||
| } | ||||
|  | ||||
| // RuntimeHandlerFeatures is a set of features implemented by the runtime handler. | ||||
| message RuntimeHandlerFeatures { | ||||
|     // recursive_read_only_mounts is set to true if the runtime handler supports | ||||
|     // recursive read-only mounts. | ||||
| @@ -1620,6 +1621,14 @@ message RuntimeHandler { | ||||
|     RuntimeHandlerFeatures features = 2; | ||||
| } | ||||
|  | ||||
| // RuntimeFeatures describes the set of features implemented by the CRI implementation. | ||||
| // The features contained in the RuntimeFeatures should depend only on the cri implementation | ||||
| // independent of runtime handlers. | ||||
| message RuntimeFeatures { | ||||
|     // supplemental_groups_policy is set to true if the runtime supports SupplementalGroupsPolicy and ContainerUser. | ||||
|     bool supplemental_groups_policy = 1; | ||||
| } | ||||
|  | ||||
| message StatusResponse { | ||||
|     // Status of the Runtime. | ||||
|     RuntimeStatus status = 1; | ||||
| @@ -1630,6 +1639,9 @@ message StatusResponse { | ||||
|     map<string, string> info = 2; | ||||
|     // Runtime handlers. | ||||
|     repeated RuntimeHandler runtime_handlers = 3; | ||||
|     // features describes the set of features implemented by the CRI implementation. | ||||
|     // This field is supposed to propagate to NodeFeatures in Kubernetes API. | ||||
|     RuntimeFeatures features = 4; | ||||
| } | ||||
|  | ||||
| message ImageFsInfoRequest {} | ||||
|   | ||||
| @@ -334,6 +334,11 @@ var ( | ||||
| 	// TODO: document the feature (owning SIG, when to use this feature for a test) | ||||
| 	StorageVersionAPI = framework.WithFeature(framework.ValidFeatures.Add("StorageVersionAPI")) | ||||
|  | ||||
| 	// Owner: sig-node | ||||
| 	// Marks tests that require a cluster with SupplementalGroupsPolicy | ||||
| 	// (used for testing fine-grained SupplementalGroups control <https://kep.k8s.io/3619>) | ||||
| 	SupplementalGroupsPolicy = framework.WithFeature(framework.ValidFeatures.Add("SupplementalGroupsPolicy")) | ||||
|  | ||||
| 	// Owner: sig-network | ||||
| 	// Marks tests that require a cluster with Topology Hints enabled. | ||||
| 	TopologyHints = framework.WithFeature(framework.ValidFeatures.Add("Topology Hints")) | ||||
|   | ||||
| @@ -25,19 +25,25 @@ package node | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"time" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/uuid" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| 	ptr "k8s.io/utils/ptr" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	"github.com/onsi/gomega/gcustom" | ||||
| ) | ||||
|  | ||||
| // SeccompProcStatusField is the field of /proc/$PID/status referencing the seccomp filter type. | ||||
| @@ -114,6 +120,167 @@ var _ = SIGDescribe("Security Context", func() { | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	SIGDescribe("SupplementalGroupsPolicy", feature.SupplementalGroupsPolicy, func() { | ||||
| 		timeout := 3 * time.Minute | ||||
|  | ||||
| 		agnhostImage := imageutils.GetE2EImage(imageutils.Agnhost) | ||||
| 		uidInImage := int64(1000) | ||||
| 		gidDefinedInImage := int64(50000) | ||||
| 		supplementalGroup := int64(60000) | ||||
|  | ||||
| 		supportsSupplementalGroupsPolicy := func(ctx context.Context, f *framework.Framework, nodeName string) bool { | ||||
| 			node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			gomega.Expect(node).NotTo(gomega.BeNil()) | ||||
| 			if node.Status.Features != nil { | ||||
| 				supportsSupplementalGroupsPolicy := node.Status.Features.SupplementalGroupsPolicy | ||||
| 				if supportsSupplementalGroupsPolicy != nil && *supportsSupplementalGroupsPolicy { | ||||
| 					return true | ||||
| 				} | ||||
| 			} | ||||
| 			return false | ||||
| 		} | ||||
| 		mkPod := func(policy *v1.SupplementalGroupsPolicy) *v1.Pod { | ||||
| 			pod := scTestPod(false, false) | ||||
|  | ||||
| 			// In specified image(agnhost E2E image), | ||||
| 			// - user-defined-in-image(uid=1000) is defined | ||||
| 			// - user-defined-in-image belongs to group-defined-in-image(gid=50000) | ||||
| 			// thus, resultant supplementary group of the container processes should be | ||||
| 			// - 1000 : self | ||||
| 			// - 50000: pre-defined groups defined in the container image(/etc/group) of self(uid=1000) | ||||
| 			// - 60000: specified in SupplementalGroups | ||||
| 			// $ id -G | ||||
| 			// 1000 50000 60000 (if SupplementalGroupsPolicy=Merge or not set) | ||||
| 			// 1000 60000       (if SupplementalGroupsPolicy=Strict) | ||||
| 			pod.Spec.SecurityContext.RunAsUser = &uidInImage | ||||
| 			pod.Spec.SecurityContext.SupplementalGroupsPolicy = policy | ||||
| 			pod.Spec.SecurityContext.SupplementalGroups = []int64{supplementalGroup} | ||||
| 			pod.Spec.Containers[0].Image = agnhostImage | ||||
| 			pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -G; while :; do sleep 1; done"} | ||||
|  | ||||
| 			return pod | ||||
| 		} | ||||
| 		waitForContainerUser := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedContainerUser *v1.ContainerUser) error { | ||||
| 			return framework.Gomega().Eventually(ctx, | ||||
| 				framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))). | ||||
| 				WithTimeout(timeout). | ||||
| 				Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) { | ||||
| 					for _, s := range p.Status.ContainerStatuses { | ||||
| 						if s.Name == containerName { | ||||
| 							return reflect.DeepEqual(s.User, expectedContainerUser), nil | ||||
| 						} | ||||
| 					} | ||||
| 					return false, nil | ||||
| 				})) | ||||
| 		} | ||||
| 		waitForPodLogs := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedLog string) error { | ||||
| 			return framework.Gomega().Eventually(ctx, | ||||
| 				framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))). | ||||
| 				WithTimeout(timeout). | ||||
| 				Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) { | ||||
| 					podLogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, p.Namespace, p.Name, containerName) | ||||
| 					if err != nil { | ||||
| 						return false, err | ||||
| 					} | ||||
| 					return podLogs == expectedLog, nil | ||||
| 				})) | ||||
| 		} | ||||
|  | ||||
| 		ginkgo.When("SupplementalGroupsPolicy was not set", func() { | ||||
| 			ginkgo.It("if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) { | ||||
| 				var pod *v1.Pod | ||||
| 				ginkgo.By("creating a pod", func() { | ||||
| 					pod = e2epod.NewPodClient(f).Create(ctx, mkPod(nil)) | ||||
| 					framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name)) | ||||
| 					var err error | ||||
| 					pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 					framework.ExpectNoError(err) | ||||
| 					if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) { | ||||
| 						e2eskipper.Skipf("node does not support SupplementalGroupsPolicy") | ||||
| 					} | ||||
| 					framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) | ||||
| 				}) | ||||
| 				expectedOutput := fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup) | ||||
| 				expectedContainerUser := &v1.ContainerUser{ | ||||
| 					Linux: &v1.LinuxContainerUser{ | ||||
| 						UID:                uidInImage, | ||||
| 						GID:                uidInImage, | ||||
| 						SupplementalGroups: []int64{uidInImage, gidDefinedInImage, supplementalGroup}, | ||||
| 					}, | ||||
| 				} | ||||
|  | ||||
| 				framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser)) | ||||
| 				framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n")) | ||||
|  | ||||
| 				stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G") | ||||
| 				gomega.Expect(stdout).To(gomega.Equal(expectedOutput)) | ||||
| 			}) | ||||
| 		}) | ||||
| 		ginkgo.When("SupplementalGroupsPolicy was set to Merge", func() { | ||||
| 			ginkgo.It("if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) { | ||||
| 				var pod *v1.Pod | ||||
| 				ginkgo.By("creating a pod", func() { | ||||
| 					pod = e2epod.NewPodClient(f).Create(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyMerge))) | ||||
| 					framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name)) | ||||
| 					var err error | ||||
| 					pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 					framework.ExpectNoError(err) | ||||
| 					if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) { | ||||
| 						e2eskipper.Skipf("node does not support SupplementalGroupsPolicy") | ||||
| 					} | ||||
| 					framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) | ||||
| 				}) | ||||
|  | ||||
| 				expectedOutput := fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup) | ||||
| 				expectedContainerUser := &v1.ContainerUser{ | ||||
| 					Linux: &v1.LinuxContainerUser{ | ||||
| 						UID:                uidInImage, | ||||
| 						GID:                uidInImage, | ||||
| 						SupplementalGroups: []int64{uidInImage, gidDefinedInImage, supplementalGroup}, | ||||
| 					}, | ||||
| 				} | ||||
|  | ||||
| 				framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser)) | ||||
| 				framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n")) | ||||
|  | ||||
| 				stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G") | ||||
| 				gomega.Expect(stdout).To(gomega.Equal(expectedOutput)) | ||||
| 			}) | ||||
| 		}) | ||||
| 		ginkgo.When("SupplementalGroupsPolicy was set to Strict", func() { | ||||
| 			ginkgo.It("even if the container's primary UID belongs to some groups in the image, it should not add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) { | ||||
| 				var pod *v1.Pod | ||||
| 				ginkgo.By("creating a pod", func() { | ||||
| 					pod = e2epod.NewPodClient(f).Create(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyStrict))) | ||||
| 					framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name)) | ||||
| 					var err error | ||||
| 					pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 					framework.ExpectNoError(err) | ||||
| 					if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) { | ||||
| 						e2eskipper.Skipf("node does not support SupplementalGroupsPolicy") | ||||
| 					} | ||||
| 					framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) | ||||
| 				}) | ||||
|  | ||||
| 				expectedOutput := fmt.Sprintf("%d %d", uidInImage, supplementalGroup) | ||||
| 				expectedContainerUser := &v1.ContainerUser{ | ||||
| 					Linux: &v1.LinuxContainerUser{ | ||||
| 						UID:                uidInImage, | ||||
| 						GID:                uidInImage, | ||||
| 						SupplementalGroups: []int64{uidInImage, supplementalGroup}, | ||||
| 					}, | ||||
| 				} | ||||
|  | ||||
| 				framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser)) | ||||
| 				framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n")) | ||||
|  | ||||
| 				stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G") | ||||
| 				gomega.Expect(stdout).To(gomega.Equal(expectedOutput)) | ||||
| 			}) | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func(ctx context.Context) { | ||||
| 		pod := scTestPod(false, false) | ||||
| 		userID := int64(1001) | ||||
|   | ||||
		Reference in New Issue
	
	Block a user
	 Kubernetes Prow Robot
					Kubernetes Prow Robot