
Automatic merge from submit-queue WantsAuthorizer admission plugin support The next step of PSP admission is to be able to limit the PSPs used based on user information. To do this the admission plugin would need to make authz checks for the `user.Info` in the request. This code allows a plugin to request the injection of an authorizer to allow it to make the authz checks. Note: this could be done with a SAR, however since admission is running in the api server using the SAR would incur an extra hop vs using the authorizer directly. @deads2k @derekwaynecarr
373 lines
15 KiB
Go
373 lines
15 KiB
Go
/*
|
|
Copyright 2014 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
// Package app does all of the work necessary to create a Kubernetes
|
|
// APIServer by binding together the API, master and APIServer infrastructure.
|
|
// It can be configured and called directly or via the hyperkube framework.
|
|
package app
|
|
|
|
import (
|
|
"crypto/tls"
|
|
"net"
|
|
"net/url"
|
|
"strconv"
|
|
"strings"
|
|
"time"
|
|
|
|
"github.com/golang/glog"
|
|
"github.com/pborman/uuid"
|
|
"github.com/spf13/cobra"
|
|
"github.com/spf13/pflag"
|
|
|
|
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
|
"k8s.io/kubernetes/pkg/admission"
|
|
"k8s.io/kubernetes/pkg/api"
|
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
|
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
|
"k8s.io/kubernetes/pkg/apis/batch"
|
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
|
"k8s.io/kubernetes/pkg/apis/rbac"
|
|
"k8s.io/kubernetes/pkg/apiserver"
|
|
"k8s.io/kubernetes/pkg/apiserver/authenticator"
|
|
"k8s.io/kubernetes/pkg/apiserver/openapi"
|
|
authorizerunion "k8s.io/kubernetes/pkg/auth/authorizer/union"
|
|
"k8s.io/kubernetes/pkg/auth/user"
|
|
"k8s.io/kubernetes/pkg/capabilities"
|
|
"k8s.io/kubernetes/pkg/cloudprovider"
|
|
"k8s.io/kubernetes/pkg/controller/informers"
|
|
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
|
generatedopenapi "k8s.io/kubernetes/pkg/generated/openapi"
|
|
"k8s.io/kubernetes/pkg/genericapiserver"
|
|
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
|
genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options"
|
|
genericvalidation "k8s.io/kubernetes/pkg/genericapiserver/validation"
|
|
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
|
"k8s.io/kubernetes/pkg/master"
|
|
"k8s.io/kubernetes/pkg/registry/cachesize"
|
|
"k8s.io/kubernetes/pkg/registry/generic"
|
|
"k8s.io/kubernetes/pkg/registry/rbac/clusterrole"
|
|
clusterroleetcd "k8s.io/kubernetes/pkg/registry/rbac/clusterrole/etcd"
|
|
"k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding"
|
|
clusterrolebindingetcd "k8s.io/kubernetes/pkg/registry/rbac/clusterrolebinding/etcd"
|
|
"k8s.io/kubernetes/pkg/registry/rbac/role"
|
|
roleetcd "k8s.io/kubernetes/pkg/registry/rbac/role/etcd"
|
|
"k8s.io/kubernetes/pkg/registry/rbac/rolebinding"
|
|
rolebindingetcd "k8s.io/kubernetes/pkg/registry/rbac/rolebinding/etcd"
|
|
"k8s.io/kubernetes/pkg/serviceaccount"
|
|
"k8s.io/kubernetes/pkg/util/wait"
|
|
authenticatorunion "k8s.io/kubernetes/plugin/pkg/auth/authenticator/request/union"
|
|
)
|
|
|
|
// NewAPIServerCommand creates a *cobra.Command object with default parameters
|
|
func NewAPIServerCommand() *cobra.Command {
|
|
s := options.NewAPIServer()
|
|
s.AddFlags(pflag.CommandLine)
|
|
cmd := &cobra.Command{
|
|
Use: "kube-apiserver",
|
|
Long: `The Kubernetes API server validates and configures data
|
|
for the api objects which include pods, services, replicationcontrollers, and
|
|
others. The API Server services REST operations and provides the frontend to the
|
|
cluster's shared state through which all other components interact.`,
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
},
|
|
}
|
|
|
|
return cmd
|
|
}
|
|
|
|
// Run runs the specified APIServer. This should never exit.
|
|
func Run(s *options.APIServer) error {
|
|
genericvalidation.VerifyEtcdServersList(s.ServerRunOptions)
|
|
genericapiserver.DefaultAndValidateRunOptions(s.ServerRunOptions)
|
|
|
|
capabilities.Initialize(capabilities.Capabilities{
|
|
AllowPrivileged: s.AllowPrivileged,
|
|
// TODO(vmarmol): Implement support for HostNetworkSources.
|
|
PrivilegedSources: capabilities.PrivilegedSources{
|
|
HostNetworkSources: []string{},
|
|
HostPIDSources: []string{},
|
|
HostIPCSources: []string{},
|
|
},
|
|
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
|
|
})
|
|
|
|
// Setup tunneler if needed
|
|
var tunneler genericapiserver.Tunneler
|
|
var proxyDialerFn apiserver.ProxyDialerFunc
|
|
if len(s.SSHUser) > 0 {
|
|
// Get ssh key distribution func, if supported
|
|
var installSSH genericapiserver.InstallSSHKey
|
|
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
|
if err != nil {
|
|
glog.Fatalf("Cloud provider could not be initialized: %v", err)
|
|
}
|
|
if cloud != nil {
|
|
if instances, supported := cloud.Instances(); supported {
|
|
installSSH = instances.AddSSHKeyToAllInstances
|
|
}
|
|
}
|
|
if s.KubeletConfig.Port == 0 {
|
|
glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.")
|
|
}
|
|
// Set up the tunneler
|
|
// TODO(cjcullen): If we want this to handle per-kubelet ports or other
|
|
// kubelet listen-addresses, we need to plumb through options.
|
|
healthCheckPath := &url.URL{
|
|
Scheme: "https",
|
|
Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)),
|
|
Path: "healthz",
|
|
}
|
|
tunneler = genericapiserver.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH)
|
|
|
|
// Use the tunneler's dialer to connect to the kubelet
|
|
s.KubeletConfig.Dial = tunneler.Dial
|
|
// Use the tunneler's dialer when proxying to pods, services, and nodes
|
|
proxyDialerFn = tunneler.Dial
|
|
}
|
|
|
|
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
|
|
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
|
|
|
|
kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig)
|
|
if err != nil {
|
|
glog.Fatalf("Failed to start kubelet client: %v", err)
|
|
}
|
|
|
|
if s.StorageConfig.DeserializationCacheSize == 0 {
|
|
// When size of cache is not explicitly set, estimate its size based on
|
|
// target memory usage.
|
|
glog.V(2).Infof("Initalizing deserialization cache size based on %dMB limit", s.TargetRAMMB)
|
|
|
|
// This is the heuristics that from memory capacity is trying to infer
|
|
// the maximum number of nodes in the cluster and set cache sizes based
|
|
// on that value.
|
|
// From our documentation, we officially recomment 120GB machines for
|
|
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
|
|
// capacity per node.
|
|
// TODO: We may consider deciding that some percentage of memory will
|
|
// be used for the deserialization cache and divide it by the max object
|
|
// size to compute its size. We may even go further and measure
|
|
// collective sizes of the objects in the cache.
|
|
clusterSize := s.TargetRAMMB / 60
|
|
s.StorageConfig.DeserializationCacheSize = 25 * clusterSize
|
|
if s.StorageConfig.DeserializationCacheSize < 1000 {
|
|
s.StorageConfig.DeserializationCacheSize = 1000
|
|
}
|
|
}
|
|
|
|
storageGroupsToEncodingVersion, err := s.StorageGroupsToEncodingVersion()
|
|
if err != nil {
|
|
glog.Fatalf("error generating storage version map: %s", err)
|
|
}
|
|
storageFactory, err := genericapiserver.BuildDefaultStorageFactory(
|
|
s.StorageConfig, s.DefaultStorageMediaType, api.Codecs,
|
|
genericapiserver.NewDefaultResourceEncodingConfig(), storageGroupsToEncodingVersion,
|
|
// FIXME: this GroupVersionResource override should be configurable
|
|
[]unversioned.GroupVersionResource{batch.Resource("scheduledjobs").WithVersion("v2alpha1")},
|
|
master.DefaultAPIResourceConfigSource(), s.RuntimeConfig)
|
|
if err != nil {
|
|
glog.Fatalf("error in initializing storage factory: %s", err)
|
|
}
|
|
storageFactory.AddCohabitatingResources(batch.Resource("jobs"), extensions.Resource("jobs"))
|
|
storageFactory.AddCohabitatingResources(autoscaling.Resource("horizontalpodautoscalers"), extensions.Resource("horizontalpodautoscalers"))
|
|
for _, override := range s.EtcdServersOverrides {
|
|
tokens := strings.Split(override, "#")
|
|
if len(tokens) != 2 {
|
|
glog.Errorf("invalid value of etcd server overrides: %s", override)
|
|
continue
|
|
}
|
|
|
|
apiresource := strings.Split(tokens[0], "/")
|
|
if len(apiresource) != 2 {
|
|
glog.Errorf("invalid resource definition: %s", tokens[0])
|
|
continue
|
|
}
|
|
group := apiresource[0]
|
|
resource := apiresource[1]
|
|
groupResource := unversioned.GroupResource{Group: group, Resource: resource}
|
|
|
|
servers := strings.Split(tokens[1], ";")
|
|
storageFactory.SetEtcdLocation(groupResource, servers)
|
|
}
|
|
|
|
// Default to the private server key for service account token signing
|
|
if len(s.ServiceAccountKeyFiles) == 0 && s.TLSPrivateKeyFile != "" {
|
|
if authenticator.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) {
|
|
s.ServiceAccountKeyFiles = []string{s.TLSPrivateKeyFile}
|
|
} else {
|
|
glog.Warning("No TLS key provided, service account token authentication disabled")
|
|
}
|
|
}
|
|
|
|
var serviceAccountGetter serviceaccount.ServiceAccountTokenGetter
|
|
if s.ServiceAccountLookup {
|
|
// If we need to look up service accounts and tokens,
|
|
// go directly to etcd to avoid recursive auth insanity
|
|
storageConfig, err := storageFactory.NewConfig(api.Resource("serviceaccounts"))
|
|
if err != nil {
|
|
glog.Fatalf("Unable to get serviceaccounts storage: %v", err)
|
|
}
|
|
serviceAccountGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource("serviceaccounts")), storageFactory.ResourcePrefix(api.Resource("secrets")))
|
|
}
|
|
|
|
apiAuthenticator, err := authenticator.New(authenticator.AuthenticatorConfig{
|
|
Anonymous: s.AnonymousAuth,
|
|
AnyToken: s.EnableAnyToken,
|
|
BasicAuthFile: s.BasicAuthFile,
|
|
ClientCAFile: s.ClientCAFile,
|
|
TokenAuthFile: s.TokenAuthFile,
|
|
OIDCIssuerURL: s.OIDCIssuerURL,
|
|
OIDCClientID: s.OIDCClientID,
|
|
OIDCCAFile: s.OIDCCAFile,
|
|
OIDCUsernameClaim: s.OIDCUsernameClaim,
|
|
OIDCGroupsClaim: s.OIDCGroupsClaim,
|
|
ServiceAccountKeyFiles: s.ServiceAccountKeyFiles,
|
|
ServiceAccountLookup: s.ServiceAccountLookup,
|
|
ServiceAccountTokenGetter: serviceAccountGetter,
|
|
KeystoneURL: s.KeystoneURL,
|
|
WebhookTokenAuthnConfigFile: s.WebhookTokenAuthnConfigFile,
|
|
WebhookTokenAuthnCacheTTL: s.WebhookTokenAuthnCacheTTL,
|
|
})
|
|
|
|
if err != nil {
|
|
glog.Fatalf("Invalid Authentication Config: %v", err)
|
|
}
|
|
|
|
authorizationModeNames := strings.Split(s.AuthorizationMode, ",")
|
|
|
|
modeEnabled := func(mode string) bool {
|
|
for _, m := range authorizationModeNames {
|
|
if m == mode {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
authorizationConfig := authorizer.AuthorizationConfig{
|
|
PolicyFile: s.AuthorizationPolicyFile,
|
|
WebhookConfigFile: s.AuthorizationWebhookConfigFile,
|
|
WebhookCacheAuthorizedTTL: s.AuthorizationWebhookCacheAuthorizedTTL,
|
|
WebhookCacheUnauthorizedTTL: s.AuthorizationWebhookCacheUnauthorizedTTL,
|
|
RBACSuperUser: s.AuthorizationRBACSuperUser,
|
|
}
|
|
if modeEnabled(genericoptions.ModeRBAC) {
|
|
mustGetRESTOptions := func(resource string) generic.RESTOptions {
|
|
config, err := storageFactory.NewConfig(rbac.Resource(resource))
|
|
if err != nil {
|
|
glog.Fatalf("Unable to get %s storage: %v", resource, err)
|
|
}
|
|
return generic.RESTOptions{StorageConfig: config, Decorator: generic.UndecoratedStorage, ResourcePrefix: storageFactory.ResourcePrefix(rbac.Resource(resource))}
|
|
}
|
|
|
|
// For initial bootstrapping go directly to etcd to avoid privillege escalation check.
|
|
authorizationConfig.RBACRoleRegistry = role.NewRegistry(roleetcd.NewREST(mustGetRESTOptions("roles")))
|
|
authorizationConfig.RBACRoleBindingRegistry = rolebinding.NewRegistry(rolebindingetcd.NewREST(mustGetRESTOptions("rolebindings")))
|
|
authorizationConfig.RBACClusterRoleRegistry = clusterrole.NewRegistry(clusterroleetcd.NewREST(mustGetRESTOptions("clusterroles")))
|
|
authorizationConfig.RBACClusterRoleBindingRegistry = clusterrolebinding.NewRegistry(clusterrolebindingetcd.NewREST(mustGetRESTOptions("clusterrolebindings")))
|
|
}
|
|
|
|
apiAuthorizer, err := authorizer.NewAuthorizerFromAuthorizationConfig(authorizationModeNames, authorizationConfig)
|
|
if err != nil {
|
|
glog.Fatalf("Invalid Authorization Config: %v", err)
|
|
}
|
|
|
|
admissionControlPluginNames := strings.Split(s.AdmissionControl, ",")
|
|
privilegedLoopbackToken := uuid.NewRandom().String()
|
|
|
|
selfClientConfig, err := s.NewSelfClientConfig(privilegedLoopbackToken)
|
|
if err != nil {
|
|
glog.Fatalf("Failed to create clientset: %v", err)
|
|
}
|
|
client, err := s.NewSelfClient(privilegedLoopbackToken)
|
|
if err != nil {
|
|
glog.Errorf("Failed to create clientset: %v", err)
|
|
}
|
|
|
|
// TODO(dims): We probably need to add an option "EnableLoopbackToken"
|
|
if apiAuthenticator != nil {
|
|
var uid = uuid.NewRandom().String()
|
|
tokens := make(map[string]*user.DefaultInfo)
|
|
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
|
|
Name: user.APIServerUser,
|
|
UID: uid,
|
|
Groups: []string{user.SystemPrivilegedGroup},
|
|
}
|
|
|
|
tokenAuthenticator := authenticator.NewAuthenticatorFromTokens(tokens)
|
|
apiAuthenticator = authenticatorunion.New(tokenAuthenticator, apiAuthenticator)
|
|
|
|
tokenAuthorizer := authorizer.NewPrivilegedGroups(user.SystemPrivilegedGroup)
|
|
apiAuthorizer = authorizerunion.New(tokenAuthorizer, apiAuthorizer)
|
|
}
|
|
|
|
sharedInformers := informers.NewSharedInformerFactory(client, 10*time.Minute)
|
|
pluginInitializer := admission.NewPluginInitializer(sharedInformers, apiAuthorizer)
|
|
|
|
admissionController, err := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile, pluginInitializer)
|
|
if err != nil {
|
|
glog.Fatalf("Failed to initialize plugins: %v", err)
|
|
}
|
|
|
|
genericConfig := genericapiserver.NewConfig(s.ServerRunOptions)
|
|
// TODO: Move the following to generic api server as well.
|
|
genericConfig.LoopbackClientConfig = selfClientConfig
|
|
genericConfig.Authenticator = apiAuthenticator
|
|
genericConfig.SupportsBasicAuth = len(s.BasicAuthFile) > 0
|
|
genericConfig.Authorizer = apiAuthorizer
|
|
genericConfig.AuthorizerRBACSuperUser = s.AuthorizationRBACSuperUser
|
|
genericConfig.AdmissionControl = admissionController
|
|
genericConfig.APIResourceConfigSource = storageFactory.APIResourceConfigSource
|
|
genericConfig.MasterServiceNamespace = s.MasterServiceNamespace
|
|
genericConfig.ProxyDialer = proxyDialerFn
|
|
genericConfig.ProxyTLSClientConfig = proxyTLSClientConfig
|
|
genericConfig.Serializer = api.Codecs
|
|
genericConfig.OpenAPIConfig.Info.Title = "Kubernetes"
|
|
genericConfig.OpenAPIConfig.Definitions = generatedopenapi.OpenAPIDefinitions
|
|
genericConfig.OpenAPIConfig.GetOperationID = openapi.GetOperationID
|
|
genericConfig.EnableOpenAPISupport = true
|
|
|
|
config := &master.Config{
|
|
GenericConfig: genericConfig,
|
|
|
|
StorageFactory: storageFactory,
|
|
EnableWatchCache: s.EnableWatchCache,
|
|
EnableCoreControllers: true,
|
|
DeleteCollectionWorkers: s.DeleteCollectionWorkers,
|
|
EventTTL: s.EventTTL,
|
|
KubeletClient: kubeletClient,
|
|
EnableUISupport: true,
|
|
EnableLogsSupport: true,
|
|
|
|
Tunneler: tunneler,
|
|
}
|
|
|
|
if s.EnableWatchCache {
|
|
glog.V(2).Infof("Initalizing cache sizes based on %dMB limit", s.TargetRAMMB)
|
|
cachesize.InitializeWatchCacheSizes(s.TargetRAMMB)
|
|
cachesize.SetWatchCacheSizes(s.WatchCacheSizes)
|
|
}
|
|
|
|
m, err := config.Complete().New()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
sharedInformers.Start(wait.NeverStop)
|
|
m.GenericAPIServer.Run()
|
|
return nil
|
|
}
|