From ca7f8973f79cfb549896d65fc6a88d298df37155 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Thu, 3 Aug 2017 15:26:41 +0300 Subject: [PATCH 01/10] RBAC work on PoC --- .../juju/layers/kubernetes-master/config.yaml | 4 + .../reactive/kubernetes_master.py | 145 ++++++++++++++++-- .../templates/heapster-rbac.yaml | 58 +++++++ .../nginx-ingress-controller-rbac.yml | 127 +++++++++++++++ .../reactive/kubernetes_worker.py | 37 ++++- .../templates/default-backend.yml | 51 ++++++ .../nginx-ingress-controller-service.yml | 16 ++ .../templates/nginx-ingress-controller.yml | 66 ++++++++ 8 files changed, 479 insertions(+), 25 deletions(-) create mode 100644 cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml create mode 100644 cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml create mode 100644 cluster/juju/layers/kubernetes-worker/templates/default-backend.yml create mode 100644 cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml create mode 100644 cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml diff --git a/cluster/juju/layers/kubernetes-master/config.yaml b/cluster/juju/layers/kubernetes-master/config.yaml index c328a43751c..aca3f31ea0a 100644 --- a/cluster/juju/layers/kubernetes-master/config.yaml +++ b/cluster/juju/layers/kubernetes-master/config.yaml @@ -40,3 +40,7 @@ options: runtime-config=batch/v2alpha1=true profiling=true will result in kube-apiserver being run with the following options: --runtime-config=batch/v2alpha1=true --profiling=true + enable-rbac: + type: boolean + default: True + description: Enable RBAC authorization mode. \ No newline at end of file diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index b6430637d47..f910020e633 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -209,12 +209,10 @@ def setup_leader_authentication(): if not get_keys_from_leader(keys) \ or is_state('reconfigure.authentication.setup'): last_pass = get_password('basic_auth.csv', 'admin') - setup_basic_auth(last_pass, 'admin', 'admin') + setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters') if not os.path.isfile(known_tokens): - setup_tokens(None, 'admin', 'admin') - setup_tokens(None, 'kubelet', 'kubelet') - setup_tokens(None, 'kube_proxy', 'kube_proxy') + touch(known_tokens) # Generate the default service account token key os.makedirs('/root/cdk', exist_ok=True) @@ -400,19 +398,63 @@ def send_cluster_dns_detail(kube_control): @when('kube-control.auth.requested') -@when('authentication.setup') +@when('snap.installed.kubectl') @when('leadership.is_leader') -def send_tokens(kube_control): - """Send the tokens to the workers.""" - kubelet_token = get_token('kubelet') - proxy_token = get_token('kube_proxy') - admin_token = get_token('admin') +def create_service_configs(kube_control): + """Create the users for kubelet""" + # generate the username/pass for the requesting unit + proxy_token = get_token('system:kube-proxy') + if not proxy_token: + setup_tokens(None, 'system:kube-proxy', 'kube-proxy') + proxy_token = get_token('system:kube-proxy') + + client_token = get_token('admin') + if not client_token: + setup_tokens(None, 'admin', 'admin', "system:masters") + client_token = get_token('admin') - # Send the data requests = kube_control.auth_user() for request in requests: - kube_control.sign_auth_request(request[0], kubelet_token, - proxy_token, admin_token) + username = request[1]['user'] + group = request[1]['group'] + kubelet_token = get_token(username) + if not kubelet_token: + # Usernames have to be in the form of system:node: + userid = "kubelet-{}".format(request[0].split('/')[1]) + setup_tokens(None, username, userid, group) + kubelet_token = get_token(username) + + kube_control.sign_auth_request(request[0], username, + kubelet_token, proxy_token, client_token) + + host.service_restart('snap.kube-apiserver.daemon') + remove_state('authentication.setup') + + +@when('kube-control.departed') +@when('leadership.is_leader') +def flush_auth_for_departed(kube_control): + ''' Unit has left the cluster and needs to have its authentication + tokens removed from the token registry ''' + token_auth_file = '/root/cdk/known_tokens.csv' + departing_unit = kube_control.flush_departed() + userid = "kubelet-{}".format(departing_unit.split('/')[1]) + known_tokens = open(token_auth_file, 'r').readlines() + for line in known_tokens[:]: + haystack = line.split(',') + # skip the entry if we dont have token,user,id,groups format + if len(haystack) < 4: + continue + if haystack[2] == userid: + hookenv.log('Found unit {} in token auth. Removing auth' + ' token.'.format(userid)) + known_tokens.remove(line) + # atomically rewrite the file minus any scrubbed units + hookenv.log('Rewriting token auth file: {}'.format(token_auth_file)) + with open(token_auth_file, 'w') as fp: + fp.writelines(known_tokens) + # Trigger rebroadcast of auth files for followers + remove_state('autentication.setup') @when_not('kube-control.connected') @@ -492,6 +534,7 @@ def addons_ready(): """ try: + apply_rbac() check_call(['cdk-addons.apply']) return True except CalledProcessError: @@ -614,6 +657,52 @@ def initial_nrpe_config(nagios=None): update_nrpe_config(nagios) +@when('config.changed.enable-rbac', + 'kubernetes-master.components.started') +def enable_rbac_config(): + config = hookenv.config() + if data_changed('rbac-flag', str(config.get('enable-rbac'))): + remove_state('kubernetes-master.components.started') + + +def apply_rbac(): + # TODO(kjackal): we should be checking if rbac is already applied + config = hookenv.config() + if is_state('leadership.is_leader'): + if config.get('enable-rbac'): + try: + cmd = ['kubectl', 'apply', '-f', 'templates/heapster-rbac.yaml'] + check_output(cmd).decode('utf-8') + except CalledProcessError: + hookenv.log('Failed to apply heapster rbac rules') + try: + cmd = ['kubectl', 'apply', '-f', 'templates/nginx-ingress-controller-rbac.yml'] + check_output(cmd).decode('utf-8') + except CalledProcessError: + hookenv.log('Failed to apply heapster rbac rules') + + # TODO(kjackal): The follwoing is wrong and imposes security risk. What we should be doing is + # update the add-ons to include an rbac enabled dashboard + try: + cmd = "kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin" \ + " --serviceaccount=kube-system:default".split(' ') + check_output(cmd).decode('utf-8') + except CalledProcessError: + hookenv.log('Failed to elevate credentials') + + else: + try: + cmd = ['kubectl', 'delete', '-f', 'templates/heapster-rbac.yaml'] + check_output(cmd).decode('utf-8') + except CalledProcessError: + hookenv.log('Failed to delete heapster rbac rules') + try: + cmd = ['kubectl', 'delete', '-f', 'templates/nginx-ingress-controller-rbac.yml'] + check_output(cmd).decode('utf-8') + except CalledProcessError: + hookenv.log('Failed to apply heapster rbac rules') + + @when('kubernetes-master.components.started') @when('nrpe-external-master.available') @when_any('config.changed.nagios_context', @@ -965,6 +1054,12 @@ def configure_apiserver(): 'DefaultTolerationSeconds' ] + if hookenv.config('enable-rbac'): + admission_control.append('NodeRestriction') + api_opts.add('authorization-mode', 'Node,RBAC', strict=True) + else: + api_opts.add('authorization-mode', 'AlwaysAllow', strict=True) + if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') admission_control.remove('DefaultTolerationSeconds') @@ -1020,7 +1115,8 @@ def configure_scheduler(): set_state('kube-scheduler.do-restart') -def setup_basic_auth(password=None, username='admin', uid='admin'): +def setup_basic_auth(password=None, username='admin', uid='admin', + groups=None): '''Create the htacces file and the tokens.''' root_cdk = '/root/cdk' if not os.path.isdir(root_cdk): @@ -1029,10 +1125,14 @@ def setup_basic_auth(password=None, username='admin', uid='admin'): if not password: password = token_generator() with open(htaccess, 'w') as stream: - stream.write('{0},{1},{2}'.format(password, username, uid)) + if groups: + stream.write('{0},{1},{2},"{3}"'.format(password, + username, uid, groups)) + else: + stream.write('{0},{1},{2}'.format(password, username, uid)) -def setup_tokens(token, username, user): +def setup_tokens(token, username, user, groups=None): '''Create a token file for kubernetes authentication.''' root_cdk = '/root/cdk' if not os.path.isdir(root_cdk): @@ -1041,7 +1141,11 @@ def setup_tokens(token, username, user): if not token: token = token_generator() with open(known_tokens, 'a') as stream: - stream.write('{0},{1},{2}\n'.format(token, username, user)) + if groups: + stream.write('{0},{1},{2},"{3}"\n'.format(token, + username, user, groups)) + else: + stream.write('{0},{1},{2}\n'.format(token, username, user)) def get_password(csv_fname, user): @@ -1107,3 +1211,10 @@ def apiserverVersion(): cmd = 'kube-apiserver --version'.split() version_string = check_output(cmd).decode('utf-8') return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) + + +def touch(fname): + try: + os.utime(fname, None) + except OSError: + open(fname, 'a').close() \ No newline at end of file diff --git a/cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml b/cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml new file mode 100644 index 00000000000..58fa1b9921b --- /dev/null +++ b/cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml @@ -0,0 +1,58 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: heapster-binding + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:heapster +subjects: +- kind: ServiceAccount + name: heapster + namespace: kube-system +--- +# Heapster's pod_nanny monitors the heapster deployment & its pod(s), and scales +# the resources of the deployment if necessary. +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: system:pod-nanny + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "extensions" + resources: + - deployments + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: heapster-binding + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: system:pod-nanny +subjects: +- kind: ServiceAccount + name: heapster + namespace: kube-system +--- diff --git a/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml b/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml new file mode 100644 index 00000000000..696f4c6e076 --- /dev/null +++ b/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml @@ -0,0 +1,127 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress-serviceaccount + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: nginx-ingress-clusterrole +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: nginx-ingress-role + namespace: kube-system +rules: + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + - "ingress-controller-leader-nginx" + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - create + - update +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: nginx-ingress-role-nisa-binding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress-role +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: nginx-ingress-clusterrole-nisa-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-ingress-clusterrole +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: kube-system diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 2f5707790b6..60129618b7a 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -47,6 +47,8 @@ from charmhelpers.contrib.charmsupport import nrpe nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' kubeconfig_path = '/root/cdk/kubeconfig' +kubeproxyconfig_path = '/root/cdk/kubeproxyconfig' +kubeclientconfig_path = '/root/.kube/config' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') @@ -319,7 +321,8 @@ def watch_for_changes(kube_api, kube_control, cni): 'tls_client.client.key.saved', 'tls_client.server.certificate.saved', 'tls_client.server.key.saved', 'kube-control.dns.available', 'kube-control.auth.available', - 'cni.available', 'kubernetes-worker.restart-needed') + 'cni.available', 'kubernetes-worker.restart-needed', + 'worker.auth.bootstrapped') def start_worker(kube_api, kube_control, auth_control, cni): ''' Start kubelet using the provided API and DNS info.''' servers = get_kube_api_servers(kube_api) @@ -335,7 +338,7 @@ def start_worker(kube_api, kube_control, auth_control, cni): hookenv.log('Waiting for cluster cidr.') return - creds = kube_control.get_auth_credentials() + creds = db.get('credentials') data_changed('kube-control.creds', creds) # set --allow-privileged flag for kubelet @@ -389,6 +392,8 @@ def render_and_launch_ingress(): '/root/cdk/addons/default-http-backend.yaml') kubectl_manifest('delete', '/root/cdk/addons/ingress-replication-controller.yaml') # noqa + kubectl_manifest('delete', + '/root/cdk/addons/ingress-replication-controller-service.yaml') # noqa hookenv.close_port(80) hookenv.close_port(443) @@ -458,11 +463,13 @@ def create_config(server, creds): cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube'] check_call(cmd) # Create kubernetes configuration in the default location for root. - create_kubeconfig('/root/.kube/config', server, ca, + create_kubeconfig(kubeclientconfig_path, server, ca, token=creds['client_token'], user='root') # Create kubernetes configuration for kubelet, and kube-proxy services. create_kubeconfig(kubeconfig_path, server, ca, token=creds['kubelet_token'], user='kubelet') + create_kubeconfig(kubeproxyconfig_path, server, ca, + token=creds['proxy_token'], user='kube-proxy') def configure_worker_services(api_servers, dns, cluster_cidr): @@ -491,7 +498,7 @@ def configure_worker_services(api_servers, dns, cluster_cidr): kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts.add('cluster-cidr', cluster_cidr) - kube_proxy_opts.add('kubeconfig', kubeconfig_path) + kube_proxy_opts.add('kubeconfig', kubeproxyconfig_path) kube_proxy_opts.add('logtostderr', 'true') kube_proxy_opts.add('v', '0') kube_proxy_opts.add('master', random.choice(api_servers), strict=True) @@ -556,7 +563,7 @@ def launch_default_ingress_controller(): # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') - render('default-http-backend.yaml', manifest, context) + render('default-backend.yml', manifest, context) hookenv.log('Creating the default http backend.') try: kubectl('apply', '-f', manifest) @@ -574,7 +581,7 @@ def launch_default_ingress_controller(): context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" manifest = addon_path.format('ingress-replication-controller.yaml') - render('ingress-replication-controller.yaml', manifest, context) + render('nginx-ingress-controller.yml', manifest, context) hookenv.log('Creating the ingress replication controller.') try: kubectl('apply', '-f', manifest) @@ -584,6 +591,17 @@ def launch_default_ingress_controller(): hookenv.close_port(80) hookenv.close_port(443) return + manifest = addon_path.format('ingress-replication-controller-service.yaml') + render('nginx-ingress-controller-service.yml', manifest, context) + hookenv.log('Creating the ingress replication controller service.') + try: + kubectl('apply', '-f', manifest) + except CalledProcessError as e: + hookenv.log(e) + hookenv.log('Failed to create ingress controller service. Will attempt again next update.') # noqa + hookenv.close_port(80) + hookenv.close_port(443) + return set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) @@ -613,7 +631,7 @@ def get_kube_api_servers(kube_api): def kubectl(*args): ''' Run a kubectl cli command with a config file. Returns stdout and throws an error if the command fails. ''' - command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args) + command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args) hookenv.log('Executing {}'.format(command)) return check_output(command) @@ -821,7 +839,10 @@ def request_kubelet_and_proxy_credentials(kube_control): def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" creds = kube_control.get_auth_credentials() - if data_changed('kube-control.creds', creds): + nodeuser = 'system:node:{}'.format(gethostname()) + if data_changed('kube-control.creds', creds) and creds['user'] == nodeuser: + db.set('credentials', creds) + set_state('worker.auth.bootstrapped') set_state('kubernetes-worker.restart-needed') diff --git a/cluster/juju/layers/kubernetes-worker/templates/default-backend.yml b/cluster/juju/layers/kubernetes-worker/templates/default-backend.yml new file mode 100644 index 00000000000..3c40989a31e --- /dev/null +++ b/cluster/juju/layers/kubernetes-worker/templates/default-backend.yml @@ -0,0 +1,51 @@ +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: default-http-backend + labels: + k8s-app: default-http-backend + namespace: kube-system +spec: + replicas: 1 + template: + metadata: + labels: + k8s-app: default-http-backend + spec: + terminationGracePeriodSeconds: 60 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: default-http-backend + namespace: kube-system + labels: + k8s-app: default-http-backend +spec: + ports: + - port: 80 + targetPort: 8080 + selector: + k8s-app: default-http-backend diff --git a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml new file mode 100644 index 00000000000..ad8b79df13d --- /dev/null +++ b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: nginx-ingress + namespace: kube-system +spec: +# Can also use LoadBalancer type + type: NodePort + ports: + - name: http + port: 8080 + nodePort: 30080 + targetPort: 80 + protocol: TCP + selector: + k8s-app: nginx-ingress-controller diff --git a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml new file mode 100644 index 00000000000..206fc3b6577 --- /dev/null +++ b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx-load-balancer-conf + namespace: kube-system +--- +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: nginx-ingress-controller + labels: + k8s-app: nginx-ingress-controller + namespace: kube-system +spec: + replicas: 1 + selector: + matchLabels: + k8s-app: nginx-ingress-controller + template: + metadata: + labels: + k8s-app: nginx-ingress-controller + annotations: + prometheus.io/port: '10254' + prometheus.io/scrape: 'true' + spec: + # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration + # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host + # that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used + # like with kubeadm + hostNetwork: true + terminationGracePeriodSeconds: 60 + serviceAccountName: nginx-ingress-serviceaccount + containers: + - name: nginx-ingress-controller + image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.11 + readinessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + livenessProbe: + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + timeoutSeconds: 1 + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + args: + - /nginx-ingress-controller + - --default-backend-service=$(POD_NAMESPACE)/default-http-backend + - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf + ports: + - containerPort: 80 + hostPort: 80 + - containerPort: 443 + hostPort: 443 From 67e2f2881b2b1fe5ead228981b5b706ac32584ae Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Fri, 4 Aug 2017 12:15:45 +0300 Subject: [PATCH 02/10] Update worker actions to use client creds --- cluster/juju/layers/kubernetes-worker/actions/microbot | 2 +- cluster/juju/layers/kubernetes-worker/actions/pause | 4 ++-- cluster/juju/layers/kubernetes-worker/actions/registry | 2 +- cluster/juju/layers/kubernetes-worker/actions/resume | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cluster/juju/layers/kubernetes-worker/actions/microbot b/cluster/juju/layers/kubernetes-worker/actions/microbot index 0306747061f..41663f253bc 100755 --- a/cluster/juju/layers/kubernetes-worker/actions/microbot +++ b/cluster/juju/layers/kubernetes-worker/actions/microbot @@ -34,7 +34,7 @@ if not context['replicas']: context['replicas'] = 3 # Declare a kubectl template when invoking kubectl -kubectl = ['kubectl', '--kubeconfig=/root/cdk/kubeconfig'] +kubectl = ['kubectl', '--kubeconfig=/root/.kube/config'] # Remove deployment if requested if context['delete']: diff --git a/cluster/juju/layers/kubernetes-worker/actions/pause b/cluster/juju/layers/kubernetes-worker/actions/pause index 7f1c66e8b73..82b3d3838dd 100755 --- a/cluster/juju/layers/kubernetes-worker/actions/pause +++ b/cluster/juju/layers/kubernetes-worker/actions/pause @@ -21,8 +21,8 @@ fi # Cordon and drain the unit -kubectl --kubeconfig=/root/cdk/kubeconfig cordon $(hostname) -kubectl --kubeconfig=/root/cdk/kubeconfig drain $(hostname) ${EXTRA_FLAGS} +kubectl --kubeconfig=/root/.kube/config cordon $(hostname) +kubectl --kubeconfig=/root/.kube/config drain $(hostname) ${EXTRA_FLAGS} # Set status to indicate the unit is paused and under maintenance. status-set 'waiting' 'Kubernetes unit paused' diff --git a/cluster/juju/layers/kubernetes-worker/actions/registry b/cluster/juju/layers/kubernetes-worker/actions/registry index a99a0d5732e..11d57ce8835 100755 --- a/cluster/juju/layers/kubernetes-worker/actions/registry +++ b/cluster/juju/layers/kubernetes-worker/actions/registry @@ -57,7 +57,7 @@ if param_error: context['ingress'] = action_get('ingress') # Declare a kubectl template when invoking kubectl -kubectl = ['kubectl', '--kubeconfig=/root/cdk/kubeconfig'] +kubectl = ['kubectl', '--kubeconfig=/root/.kube/config'] # Remove deployment if requested if deletion: diff --git a/cluster/juju/layers/kubernetes-worker/actions/resume b/cluster/juju/layers/kubernetes-worker/actions/resume index 6131e8e037b..f7ef0a17f99 100755 --- a/cluster/juju/layers/kubernetes-worker/actions/resume +++ b/cluster/juju/layers/kubernetes-worker/actions/resume @@ -4,5 +4,5 @@ set -ex export PATH=$PATH:/snap/bin -kubectl --kubeconfig=/root/cdk/kubeconfig uncordon $(hostname) +kubectl --kubeconfig=/root/.kube/config uncordon $(hostname) status-set 'active' 'Kubernetes unit resumed' From deb68518a2417677e9561aa39ac06901a064e05d Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Sat, 5 Aug 2017 01:03:23 +0300 Subject: [PATCH 03/10] Use new kube-control interface --- .../layers/kubernetes-worker/reactive/kubernetes_worker.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 60129618b7a..1aaa04b1399 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -838,9 +838,11 @@ def request_kubelet_and_proxy_credentials(kube_control): @when('kube-control.auth.available') def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" - creds = kube_control.get_auth_credentials() nodeuser = 'system:node:{}'.format(gethostname()) - if data_changed('kube-control.creds', creds) and creds['user'] == nodeuser: + creds = kube_control.get_auth_credentials(nodeuser) + if creds \ + and data_changed('kube-control.creds', creds) \ + and creds['user'] == nodeuser: db.set('credentials', creds) set_state('worker.auth.bootstrapped') set_state('kubernetes-worker.restart-needed') From 6e4814e9485893de51ce723dabd92349d52713f9 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Mon, 7 Aug 2017 15:05:12 +0300 Subject: [PATCH 04/10] Update e2e to use new control interface --- .../kubernetes-e2e/reactive/kubernetes_e2e.py | 31 ++++++++++++++----- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py b/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py index 76a97aa0d63..b982dc65ec3 100644 --- a/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py +++ b/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py @@ -22,8 +22,9 @@ from charms.reactive import is_state from charms.reactive import set_state from charms.reactive import when from charms.reactive import when_not +from charms.reactive.helpers import data_changed -from charmhelpers.core import hookenv +from charmhelpers.core import hookenv, unitdata from shlex import split @@ -31,6 +32,9 @@ from subprocess import check_call from subprocess import check_output +db = unitdata.kv() + + @hook('upgrade-charm') def reset_delivery_states(): ''' Remove the state set when resources are unpacked. ''' @@ -87,15 +91,16 @@ def install_snaps(): @when('tls_client.ca.saved', 'tls_client.client.certificate.saved', 'tls_client.client.key.saved', 'kubernetes-master.available', - 'kubernetes-e2e.installed', 'kube-control.auth.available') + 'kubernetes-e2e.installed', 'e2e.auth.bootstrapped') @when_not('kubeconfig.ready') -def prepare_kubeconfig_certificates(master, kube_control): +def prepare_kubeconfig_certificates(master): ''' Prepare the data to feed to create the kubeconfig file. ''' layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') - creds = kube_control.get_auth_credentials() + creds = db.get('credentials') + data_changed('kube-control.creds', creds) servers = get_kube_api_servers(master) @@ -118,13 +123,23 @@ def prepare_kubeconfig_certificates(master, kube_control): def request_credentials(kube_control): """ Request authorization creds.""" - # The kube-cotrol interface is created to support RBAC. - # At this point we might as well do the right thing and return the hostname - # even if it will only be used when we enable RBAC - user = 'system:masters' + # Ask for a user, although we will be using the 'client_token' + user = 'system:e2e' kube_control.set_auth_request(user) +@when('kube-control.auth.available') +def catch_change_in_creds(kube_control): + """Request a service restart in case credential updates were detected.""" + user = 'system:e2e' + creds = kube_control.get_auth_credentials(user) + if creds \ + and data_changed('kube-control.creds', creds) \ + and creds['user'] == user: + db.set('credentials', creds) + set_state('e2e.auth.bootstrapped') + + @when('kubernetes-e2e.installed', 'kubeconfig.ready') def set_app_version(): ''' Declare the application version to juju ''' From 1c7f4cf363c382925844059c4fe13d9e399df163 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Tue, 8 Aug 2017 14:09:03 +0300 Subject: [PATCH 05/10] Fix ingress and microbot --- .../nginx-ingress-controller-rbac.yml | 10 +-- .../reactive/kubernetes_worker.py | 17 +---- .../templates/default-backend.yml | 51 -------------- .../templates/default-http-backend.yaml | 26 +++++--- .../nginx-ingress-controller-service.yml | 16 ----- .../templates/nginx-ingress-controller.yml | 66 ------------------- 6 files changed, 23 insertions(+), 163 deletions(-) delete mode 100644 cluster/juju/layers/kubernetes-worker/templates/default-backend.yml delete mode 100644 cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml delete mode 100644 cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml diff --git a/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml b/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml index 696f4c6e076..5b039282353 100644 --- a/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml +++ b/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: nginx-ingress-serviceaccount - namespace: kube-system + namespace: default --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole @@ -60,7 +60,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: nginx-ingress-role - namespace: kube-system + namespace: default rules: - apiGroups: - "" @@ -103,7 +103,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: nginx-ingress-role-nisa-binding - namespace: kube-system + namespace: default roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -111,7 +111,7 @@ roleRef: subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount - namespace: kube-system + namespace: default --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding @@ -124,4 +124,4 @@ roleRef: subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount - namespace: kube-system + namespace: default diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 1aaa04b1399..d5d5a6d3b63 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -392,8 +392,6 @@ def render_and_launch_ingress(): '/root/cdk/addons/default-http-backend.yaml') kubectl_manifest('delete', '/root/cdk/addons/ingress-replication-controller.yaml') # noqa - kubectl_manifest('delete', - '/root/cdk/addons/ingress-replication-controller-service.yaml') # noqa hookenv.close_port(80) hookenv.close_port(443) @@ -563,7 +561,7 @@ def launch_default_ingress_controller(): # Render the default http backend (404) replicationcontroller manifest manifest = addon_path.format('default-http-backend.yaml') - render('default-backend.yml', manifest, context) + render('default-http-backend.yaml', manifest, context) hookenv.log('Creating the default http backend.') try: kubectl('apply', '-f', manifest) @@ -581,7 +579,7 @@ def launch_default_ingress_controller(): context['ingress_image'] = \ "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" manifest = addon_path.format('ingress-replication-controller.yaml') - render('nginx-ingress-controller.yml', manifest, context) + render('ingress-replication-controller.yaml', manifest, context) hookenv.log('Creating the ingress replication controller.') try: kubectl('apply', '-f', manifest) @@ -591,17 +589,6 @@ def launch_default_ingress_controller(): hookenv.close_port(80) hookenv.close_port(443) return - manifest = addon_path.format('ingress-replication-controller-service.yaml') - render('nginx-ingress-controller-service.yml', manifest, context) - hookenv.log('Creating the ingress replication controller service.') - try: - kubectl('apply', '-f', manifest) - except CalledProcessError as e: - hookenv.log(e) - hookenv.log('Failed to create ingress controller service. Will attempt again next update.') # noqa - hookenv.close_port(80) - hookenv.close_port(443) - return set_state('kubernetes-worker.ingress.available') hookenv.open_port(80) diff --git a/cluster/juju/layers/kubernetes-worker/templates/default-backend.yml b/cluster/juju/layers/kubernetes-worker/templates/default-backend.yml deleted file mode 100644 index 3c40989a31e..00000000000 --- a/cluster/juju/layers/kubernetes-worker/templates/default-backend.yml +++ /dev/null @@ -1,51 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: default-http-backend - labels: - k8s-app: default-http-backend - namespace: kube-system -spec: - replicas: 1 - template: - metadata: - labels: - k8s-app: default-http-backend - spec: - terminationGracePeriodSeconds: 60 - containers: - - name: default-http-backend - # Any image is permissable as long as: - # 1. It serves a 404 page at / - # 2. It serves 200 on a /healthz endpoint - image: gcr.io/google_containers/defaultbackend:1.0 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: default-http-backend - namespace: kube-system - labels: - k8s-app: default-http-backend -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - k8s-app: default-http-backend diff --git a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml index 02500dc679d..739ae2758a2 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml @@ -1,15 +1,15 @@ -apiVersion: v1 -kind: ReplicationController +apiVersion: extensions/v1beta1 +kind: Deployment metadata: name: default-http-backend + labels: + k8s-app: default-http-backend spec: replicas: 1 - selector: - app: default-http-backend template: metadata: labels: - app: default-http-backend + k8s-app: default-http-backend spec: terminationGracePeriodSeconds: 60 containers: @@ -27,17 +27,23 @@ spec: timeoutSeconds: 5 ports: - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi --- apiVersion: v1 kind: Service metadata: name: default-http-backend labels: - app: default-http-backend + k8s-app: default-http-backend spec: ports: - - port: 80 - protocol: TCP - targetPort: 80 + - port: 80 + targetPort: 8080 selector: - app: default-http-backend + k8s-app: default-http-backend diff --git a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml deleted file mode 100644 index ad8b79df13d..00000000000 --- a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-service.yml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: nginx-ingress - namespace: kube-system -spec: -# Can also use LoadBalancer type - type: NodePort - ports: - - name: http - port: 8080 - nodePort: 30080 - targetPort: 80 - protocol: TCP - selector: - k8s-app: nginx-ingress-controller diff --git a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml deleted file mode 100644 index 206fc3b6577..00000000000 --- a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller.yml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: nginx-load-balancer-conf - namespace: kube-system ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: nginx-ingress-controller - labels: - k8s-app: nginx-ingress-controller - namespace: kube-system -spec: - replicas: 1 - selector: - matchLabels: - k8s-app: nginx-ingress-controller - template: - metadata: - labels: - k8s-app: nginx-ingress-controller - annotations: - prometheus.io/port: '10254' - prometheus.io/scrape: 'true' - spec: - # hostNetwork makes it possible to use ipv6 and to preserve the source IP correctly regardless of docker configuration - # however, it is not a hard dependency of the nginx-ingress-controller itself and it may cause issues if port 10254 already is taken on the host - # that said, since hostPort is broken on CNI (https://github.com/kubernetes/kubernetes/issues/31307) we have to use hostNetwork where CNI is used - # like with kubeadm - hostNetwork: true - terminationGracePeriodSeconds: 60 - serviceAccountName: nginx-ingress-serviceaccount - containers: - - name: nginx-ingress-controller - image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.11 - readinessProbe: - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - livenessProbe: - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - timeoutSeconds: 1 - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-load-balancer-conf - ports: - - containerPort: 80 - hostPort: 80 - - containerPort: 443 - hostPort: 443 From eb698629df9c5ce1ba187766c5b6cf5f0656113a Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 9 Aug 2017 12:10:54 +0300 Subject: [PATCH 06/10] Use the updated (RBAC enabled) cdk-addons --- .../juju/layers/kubernetes-master/README.md | 4 + .../reactive/kubernetes_master.py | 39 ------ .../templates/heapster-rbac.yaml | 58 -------- .../nginx-ingress-controller-rbac.yml | 127 ----------------- .../ingress-replication-controller.yaml | 128 ++++++++++++++++++ .../nginx-ingress-controller-rbac.yml | 0 6 files changed, 132 insertions(+), 224 deletions(-) delete mode 100644 cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml delete mode 100644 cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml create mode 100644 cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-rbac.yml diff --git a/cluster/juju/layers/kubernetes-master/README.md b/cluster/juju/layers/kubernetes-master/README.md index c1738869a8d..c1cc84a6cf0 100644 --- a/cluster/juju/layers/kubernetes-master/README.md +++ b/cluster/juju/layers/kubernetes-master/README.md @@ -54,6 +54,10 @@ The domain name to use for the Kubernetes cluster for DNS. Enables the installation of Kubernetes dashboard, Heapster, Grafana, and InfluxDB. +#### enable-rbac + +Enable RBAC and Node authorisation. + # DNS for the cluster The DNS add-on allows the pods to have a DNS names in addition to IP addresses. diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index f910020e633..df78ee20d11 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -534,7 +534,6 @@ def addons_ready(): """ try: - apply_rbac() check_call(['cdk-addons.apply']) return True except CalledProcessError: @@ -665,44 +664,6 @@ def enable_rbac_config(): remove_state('kubernetes-master.components.started') -def apply_rbac(): - # TODO(kjackal): we should be checking if rbac is already applied - config = hookenv.config() - if is_state('leadership.is_leader'): - if config.get('enable-rbac'): - try: - cmd = ['kubectl', 'apply', '-f', 'templates/heapster-rbac.yaml'] - check_output(cmd).decode('utf-8') - except CalledProcessError: - hookenv.log('Failed to apply heapster rbac rules') - try: - cmd = ['kubectl', 'apply', '-f', 'templates/nginx-ingress-controller-rbac.yml'] - check_output(cmd).decode('utf-8') - except CalledProcessError: - hookenv.log('Failed to apply heapster rbac rules') - - # TODO(kjackal): The follwoing is wrong and imposes security risk. What we should be doing is - # update the add-ons to include an rbac enabled dashboard - try: - cmd = "kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin" \ - " --serviceaccount=kube-system:default".split(' ') - check_output(cmd).decode('utf-8') - except CalledProcessError: - hookenv.log('Failed to elevate credentials') - - else: - try: - cmd = ['kubectl', 'delete', '-f', 'templates/heapster-rbac.yaml'] - check_output(cmd).decode('utf-8') - except CalledProcessError: - hookenv.log('Failed to delete heapster rbac rules') - try: - cmd = ['kubectl', 'delete', '-f', 'templates/nginx-ingress-controller-rbac.yml'] - check_output(cmd).decode('utf-8') - except CalledProcessError: - hookenv.log('Failed to apply heapster rbac rules') - - @when('kubernetes-master.components.started') @when('nrpe-external-master.available') @when_any('config.changed.nagios_context', diff --git a/cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml b/cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml deleted file mode 100644 index 58fa1b9921b..00000000000 --- a/cluster/juju/layers/kubernetes-master/templates/heapster-rbac.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: heapster-binding - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:heapster -subjects: -- kind: ServiceAccount - name: heapster - namespace: kube-system ---- -# Heapster's pod_nanny monitors the heapster deployment & its pod(s), and scales -# the resources of the deployment if necessary. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: system:pod-nanny - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - get -- apiGroups: - - "extensions" - resources: - - deployments - verbs: - - get - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: heapster-binding - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: system:pod-nanny -subjects: -- kind: ServiceAccount - name: heapster - namespace: kube-system ---- diff --git a/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml b/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml deleted file mode 100644 index 5b039282353..00000000000 --- a/cluster/juju/layers/kubernetes-master/templates/nginx-ingress-controller-rbac.yml +++ /dev/null @@ -1,127 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nginx-ingress-serviceaccount - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: nginx-ingress-clusterrole -rules: - - apiGroups: - - "" - resources: - - configmaps - - endpoints - - nodes - - pods - - secrets - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - resources: - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - apiGroups: - - "extensions" - resources: - - ingresses/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: nginx-ingress-role - namespace: default -rules: - - apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - namespaces - verbs: - - get - - apiGroups: - - "" - resources: - - configmaps - resourceNames: - # Defaults to "-" - # Here: "-" - # This has to be adapted if you change either parameter - # when launching the nginx-ingress-controller. - - "ingress-controller-leader-nginx" - verbs: - - get - - update - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get - - create - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: nginx-ingress-role-nisa-binding - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress-role -subjects: - - kind: ServiceAccount - name: nginx-ingress-serviceaccount - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: nginx-ingress-clusterrole-nisa-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress-clusterrole -subjects: - - kind: ServiceAccount - name: nginx-ingress-serviceaccount - namespace: default diff --git a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml b/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml index aa7173ce025..8fea69d3987 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml @@ -1,4 +1,132 @@ apiVersion: v1 +kind: ServiceAccount +metadata: + name: nginx-ingress-serviceaccount + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRole +metadata: + name: nginx-ingress-clusterrole +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - "extensions" + resources: + - ingresses/status + verbs: + - update +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: Role +metadata: + name: nginx-ingress-role + namespace: default +rules: + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + resourceNames: + # Defaults to "-" + # Here: "-" + # This has to be adapted if you change either parameter + # when launching the nginx-ingress-controller. + - "ingress-controller-leader-nginx" + verbs: + - get + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - apiGroups: + - "" + resources: + - endpoints + verbs: + - get + - create + - update +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: RoleBinding +metadata: + name: nginx-ingress-role-nisa-binding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nginx-ingress-role +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: nginx-ingress-clusterrole-nisa-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: nginx-ingress-clusterrole +subjects: + - kind: ServiceAccount + name: nginx-ingress-serviceaccount + namespace: default +--- +apiVersion: v1 kind: ConfigMap metadata: name: nginx-load-balancer-conf diff --git a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-rbac.yml b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-rbac.yml new file mode 100644 index 00000000000..e69de29bb2d From b8a4fa6d81ba78a8562f4cfb795d6fdd8b2ca3fe Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Fri, 11 Aug 2017 12:51:38 +0300 Subject: [PATCH 07/10] Move ingress to kube-system. Rename enable-rbac to authorization-mode. --- .../juju/layers/kubernetes-master/config.yaml | 10 ++++++---- .../reactive/kubernetes_master.py | 18 +++++++++++++----- .../templates/default-http-backend.yaml | 2 ++ .../ingress-replication-controller.yaml | 11 ++++++----- 4 files changed, 27 insertions(+), 14 deletions(-) diff --git a/cluster/juju/layers/kubernetes-master/config.yaml b/cluster/juju/layers/kubernetes-master/config.yaml index aca3f31ea0a..fd6e65e62d1 100644 --- a/cluster/juju/layers/kubernetes-master/config.yaml +++ b/cluster/juju/layers/kubernetes-master/config.yaml @@ -40,7 +40,9 @@ options: runtime-config=batch/v2alpha1=true profiling=true will result in kube-apiserver being run with the following options: --runtime-config=batch/v2alpha1=true --profiling=true - enable-rbac: - type: boolean - default: True - description: Enable RBAC authorization mode. \ No newline at end of file + authorization-mode: + type: string + default: "RBAC" + description: | + Set the cluster's authorization mode. Allowed values are + "RBAC" and "None". \ No newline at end of file diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index df78ee20d11..8c61bb9d724 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -59,6 +59,7 @@ nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') +valid_auth_modes = ['rbac', 'none'] def service_cidr(): ''' Return the charm's service-cidr config ''' @@ -321,6 +322,11 @@ def idle_status(kube_api, kube_control): msg = 'WARN: cannot change service-cidr, still using ' + service_cidr() hookenv.status_set('active', msg) else: + mode = hookenv.config().get('authorization-mode').lower() + if mode not in valid_auth_modes: + hookenv.status_set('blocked', 'Incorrect authorization mode.') + return + # All services should be up and running at this point. Double-check... failing_services = master_services_down() if len(failing_services) == 0: @@ -656,11 +662,13 @@ def initial_nrpe_config(nagios=None): update_nrpe_config(nagios) -@when('config.changed.enable-rbac', +@when('config.changed.authorization-mode', 'kubernetes-master.components.started') -def enable_rbac_config(): +def switch_auth_mode(): config = hookenv.config() - if data_changed('rbac-flag', str(config.get('enable-rbac'))): + mode = config.get('authorization-mode').lower() + if mode in valid_auth_modes and \ + data_changed('auth-mode', mode): remove_state('kubernetes-master.components.started') @@ -1015,7 +1023,7 @@ def configure_apiserver(): 'DefaultTolerationSeconds' ] - if hookenv.config('enable-rbac'): + if hookenv.config('authorization-mode').lower() == 'rbac': admission_control.append('NodeRestriction') api_opts.add('authorization-mode', 'Node,RBAC', strict=True) else: @@ -1178,4 +1186,4 @@ def touch(fname): try: os.utime(fname, None) except OSError: - open(fname, 'a').close() \ No newline at end of file + open(fname, 'a').close() diff --git a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml index 739ae2758a2..2e337b18eb9 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml @@ -2,6 +2,7 @@ apiVersion: extensions/v1beta1 kind: Deployment metadata: name: default-http-backend + namespace: kube-system labels: k8s-app: default-http-backend spec: @@ -39,6 +40,7 @@ apiVersion: v1 kind: Service metadata: name: default-http-backend + namespace: kube-system labels: k8s-app: default-http-backend spec: diff --git a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml b/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml index 8fea69d3987..83640918332 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: ServiceAccount metadata: name: nginx-ingress-serviceaccount - namespace: default + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole @@ -60,7 +60,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: nginx-ingress-role - namespace: default + namespace: kube-system rules: - apiGroups: - "" @@ -103,7 +103,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: nginx-ingress-role-nisa-binding - namespace: default + namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -111,7 +111,7 @@ roleRef: subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount - namespace: default + namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding @@ -124,7 +124,7 @@ roleRef: subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount - namespace: default + namespace: kube-system --- apiVersion: v1 kind: ConfigMap @@ -135,6 +135,7 @@ apiVersion: v1 kind: ReplicationController metadata: name: nginx-ingress-controller + namespace: kube-system labels: k8s-app: nginx-ingress-lb spec: From 50354896b6201bed02c67b20834ef975d15f6b70 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Tue, 29 Aug 2017 11:10:57 +0300 Subject: [PATCH 08/10] Fix trimmed files comming from leadership --- .../juju/layers/kubernetes-master/reactive/kubernetes_master.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 8c61bb9d724..ffd2d6d47da 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -301,6 +301,7 @@ def get_keys_from_leader(keys, overwrite_local=False): # Write out the file and move on to the next item with open(k, 'w+') as fp: fp.write(contents) + fp.write('\n') return True From 95fec2dc3faa3d78c8a8ce9d80f42e1d38456c10 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Wed, 11 Oct 2017 17:13:19 +0300 Subject: [PATCH 09/10] Work on upgrade path --- .../juju/layers/kubernetes-master/config.yaml | 4 +- .../reactive/kubernetes_master.py | 57 ++++++++++++++++--- .../reactive/kubernetes_worker.py | 2 +- .../templates/default-http-backend.yaml | 25 +++----- .../ingress-replication-controller.yaml | 8 +-- .../nginx-ingress-controller-rbac.yml | 0 6 files changed, 63 insertions(+), 33 deletions(-) delete mode 100644 cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-rbac.yml diff --git a/cluster/juju/layers/kubernetes-master/config.yaml b/cluster/juju/layers/kubernetes-master/config.yaml index fd6e65e62d1..c6c09cf82a2 100644 --- a/cluster/juju/layers/kubernetes-master/config.yaml +++ b/cluster/juju/layers/kubernetes-master/config.yaml @@ -42,7 +42,7 @@ options: --runtime-config=batch/v2alpha1=true --profiling=true authorization-mode: type: string - default: "RBAC" + default: "None" description: | Set the cluster's authorization mode. Allowed values are - "RBAC" and "None". \ No newline at end of file + "RBAC" and "None". diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index ffd2d6d47da..ee1a6e372a9 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -26,6 +26,8 @@ import ipaddress import charms.leadership +from shutil import move + from shlex import split from subprocess import check_call from subprocess import check_output @@ -61,6 +63,7 @@ os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') valid_auth_modes = ['rbac', 'none'] + def service_cidr(): ''' Return the charm's service-cidr config ''' db = unitdata.kv() @@ -80,10 +83,41 @@ def reset_states_for_delivery(): '''An upgrade charm event was triggered by Juju, react to that here.''' migrate_from_pre_snaps() install_snaps() + add_rbac_roles() set_state('reconfigure.authentication.setup') remove_state('authentication.setup') +def add_rbac_roles(): + '''Update the known_tokens file with proper groups.''' + + tokens_fname = '/root/cdk/known_tokens.csv' + tokens_backup_fname = '/root/cdk/known_tokens.csv.backup' + move(tokens_fname, tokens_backup_fname) + with open(tokens_fname, 'w') as ftokens: + with open(tokens_backup_fname, 'r') as stream: + for line in stream: + record = line.strip().split(',') + # token, username, user, groups + if record[2] == 'admin' and len(record) == 3: + towrite = '{0},{1},{2},"{3}"\n'.format(record[0], + record[1], + record[2], + 'system:masters') + ftokens.write(towrite) + continue + if record[2] == 'kube_proxy': + towrite = '{0},{1},{2}\n'.format(record[0], + 'system:kube-proxy', + 'kube-proxy') + ftokens.write(towrite) + continue + if record[2] == 'kubelet' and record[1] == 'kubelet': + continue + + ftokens.write('{}'.format(line)) + + def rename_file_idempotent(source, destination): if os.path.isfile(source): os.rename(source, destination) @@ -404,38 +438,43 @@ def send_cluster_dns_detail(kube_control): kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) -@when('kube-control.auth.requested') +@when('kube-control.connected') @when('snap.installed.kubectl') @when('leadership.is_leader') def create_service_configs(kube_control): """Create the users for kubelet""" + should_restart = False # generate the username/pass for the requesting unit proxy_token = get_token('system:kube-proxy') if not proxy_token: setup_tokens(None, 'system:kube-proxy', 'kube-proxy') proxy_token = get_token('system:kube-proxy') + should_restart = True client_token = get_token('admin') if not client_token: setup_tokens(None, 'admin', 'admin', "system:masters") client_token = get_token('admin') + should_restart = True requests = kube_control.auth_user() for request in requests: username = request[1]['user'] group = request[1]['group'] kubelet_token = get_token(username) - if not kubelet_token: + if not kubelet_token and username and group: # Usernames have to be in the form of system:node: userid = "kubelet-{}".format(request[0].split('/')[1]) setup_tokens(None, username, userid, group) kubelet_token = get_token(username) + kube_control.sign_auth_request(request[0], username, + kubelet_token, proxy_token, + client_token) + should_restart = True - kube_control.sign_auth_request(request[0], username, - kubelet_token, proxy_token, client_token) - - host.service_restart('snap.kube-apiserver.daemon') - remove_state('authentication.setup') + if should_restart: + host.service_restart('snap.kube-apiserver.daemon') + remove_state('authentication.setup') @when('kube-control.departed') @@ -1113,7 +1152,9 @@ def setup_tokens(token, username, user, groups=None): with open(known_tokens, 'a') as stream: if groups: stream.write('{0},{1},{2},"{3}"\n'.format(token, - username, user, groups)) + username, + user, + groups)) else: stream.write('{0},{1},{2}\n'.format(token, username, user)) diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index d5d5a6d3b63..ff537a8828a 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -822,7 +822,7 @@ def request_kubelet_and_proxy_credentials(kube_control): kube_control.set_auth_request(nodeuser) -@when('kube-control.auth.available') +@when('kube-control.connected') def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" nodeuser = 'system:node:{}'.format(gethostname()) diff --git a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml index 2e337b18eb9..6c826ac320e 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/default-http-backend.yaml @@ -1,16 +1,15 @@ -apiVersion: extensions/v1beta1 -kind: Deployment +apiVersion: v1 +kind: ReplicationController metadata: name: default-http-backend - namespace: kube-system - labels: - k8s-app: default-http-backend spec: replicas: 1 + selector: + app: default-http-backend template: metadata: labels: - k8s-app: default-http-backend + app: default-http-backend spec: terminationGracePeriodSeconds: 60 containers: @@ -28,24 +27,18 @@ spec: timeoutSeconds: 5 ports: - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi --- apiVersion: v1 kind: Service metadata: name: default-http-backend - namespace: kube-system +# namespace: kube-system labels: k8s-app: default-http-backend spec: ports: - port: 80 - targetPort: 8080 + protocol: TCP + targetPort: 80 selector: - k8s-app: default-http-backend + app: default-http-backend diff --git a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml b/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml index 83640918332..933b1e2c00b 100644 --- a/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml +++ b/cluster/juju/layers/kubernetes-worker/templates/ingress-replication-controller.yaml @@ -2,7 +2,6 @@ apiVersion: v1 kind: ServiceAccount metadata: name: nginx-ingress-serviceaccount - namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole @@ -60,7 +59,6 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: nginx-ingress-role - namespace: kube-system rules: - apiGroups: - "" @@ -103,7 +101,6 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: nginx-ingress-role-nisa-binding - namespace: kube-system roleRef: apiGroup: rbac.authorization.k8s.io kind: Role @@ -111,7 +108,6 @@ roleRef: subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount - namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding @@ -124,7 +120,7 @@ roleRef: subjects: - kind: ServiceAccount name: nginx-ingress-serviceaccount - namespace: kube-system + namespace: default --- apiVersion: v1 kind: ConfigMap @@ -135,7 +131,6 @@ apiVersion: v1 kind: ReplicationController metadata: name: nginx-ingress-controller - namespace: kube-system labels: k8s-app: nginx-ingress-lb spec: @@ -152,6 +147,7 @@ spec: # hostPort doesn't work with CNI, so we have to use hostNetwork instead # see https://github.com/kubernetes/kubernetes/issues/23920 hostNetwork: true + serviceAccountName: nginx-ingress-serviceaccount containers: - image: {{ ingress_image }} name: nginx-ingress-lb diff --git a/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-rbac.yml b/cluster/juju/layers/kubernetes-worker/templates/nginx-ingress-controller-rbac.yml deleted file mode 100644 index e69de29bb2d..00000000000 From 9a28e9b125d076465779e011b160419754b50e96 Mon Sep 17 00:00:00 2001 From: Konstantinos Tsakalozos Date: Tue, 17 Oct 2017 22:31:22 +0300 Subject: [PATCH 10/10] Addressing review comments --- .../kubernetes-e2e/reactive/kubernetes_e2e.py | 20 +++++++--------- .../juju/layers/kubernetes-master/config.yaml | 6 ++--- .../reactive/kubernetes_master.py | 24 +++++++------------ .../reactive/kubernetes_worker.py | 6 ++--- 4 files changed, 22 insertions(+), 34 deletions(-) diff --git a/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py b/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py index b982dc65ec3..292038e14b9 100644 --- a/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py +++ b/cluster/juju/layers/kubernetes-e2e/reactive/kubernetes_e2e.py @@ -24,7 +24,7 @@ from charms.reactive import when from charms.reactive import when_not from charms.reactive.helpers import data_changed -from charmhelpers.core import hookenv, unitdata +from charmhelpers.core import hookenv from shlex import split @@ -32,7 +32,7 @@ from subprocess import check_call from subprocess import check_output -db = unitdata.kv() +USER = 'system:e2e' @hook('upgrade-charm') @@ -91,15 +91,16 @@ def install_snaps(): @when('tls_client.ca.saved', 'tls_client.client.certificate.saved', 'tls_client.client.key.saved', 'kubernetes-master.available', - 'kubernetes-e2e.installed', 'e2e.auth.bootstrapped') + 'kubernetes-e2e.installed', 'e2e.auth.bootstrapped', + 'kube-control.auth.available') @when_not('kubeconfig.ready') -def prepare_kubeconfig_certificates(master): +def prepare_kubeconfig_certificates(master, kube_control): ''' Prepare the data to feed to create the kubeconfig file. ''' layer_options = layer.options('tls-client') # Get all the paths to the tls information required for kubeconfig. ca = layer_options.get('ca_certificate_path') - creds = db.get('credentials') + creds = kube_control.get_auth_credentials(USER) data_changed('kube-control.creds', creds) servers = get_kube_api_servers(master) @@ -124,19 +125,16 @@ def request_credentials(kube_control): """ Request authorization creds.""" # Ask for a user, although we will be using the 'client_token' - user = 'system:e2e' - kube_control.set_auth_request(user) + kube_control.set_auth_request(USER) @when('kube-control.auth.available') def catch_change_in_creds(kube_control): """Request a service restart in case credential updates were detected.""" - user = 'system:e2e' - creds = kube_control.get_auth_credentials(user) + creds = kube_control.get_auth_credentials(USER) if creds \ and data_changed('kube-control.creds', creds) \ - and creds['user'] == user: - db.set('credentials', creds) + and creds['user'] == USER: set_state('e2e.auth.bootstrapped') diff --git a/cluster/juju/layers/kubernetes-master/config.yaml b/cluster/juju/layers/kubernetes-master/config.yaml index c6c09cf82a2..61fbfab257c 100644 --- a/cluster/juju/layers/kubernetes-master/config.yaml +++ b/cluster/juju/layers/kubernetes-master/config.yaml @@ -42,7 +42,7 @@ options: --runtime-config=batch/v2alpha1=true --profiling=true authorization-mode: type: string - default: "None" + default: "AlwaysAllow" description: | - Set the cluster's authorization mode. Allowed values are - "RBAC" and "None". + Comma separated authorization modes. Allowed values are + "RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow". diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index ee1a6e372a9..de10c4ae22d 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -61,8 +61,6 @@ nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') -valid_auth_modes = ['rbac', 'none'] - def service_cidr(): ''' Return the charm's service-cidr config ''' @@ -357,11 +355,6 @@ def idle_status(kube_api, kube_control): msg = 'WARN: cannot change service-cidr, still using ' + service_cidr() hookenv.status_set('active', msg) else: - mode = hookenv.config().get('authorization-mode').lower() - if mode not in valid_auth_modes: - hookenv.status_set('blocked', 'Incorrect authorization mode.') - return - # All services should be up and running at this point. Double-check... failing_services = master_services_down() if len(failing_services) == 0: @@ -463,7 +456,7 @@ def create_service_configs(kube_control): group = request[1]['group'] kubelet_token = get_token(username) if not kubelet_token and username and group: - # Usernames have to be in the form of system:node: + # Usernames have to be in the form of system:node: userid = "kubelet-{}".format(request[0].split('/')[1]) setup_tokens(None, username, userid, group) kubelet_token = get_token(username) @@ -500,7 +493,7 @@ def flush_auth_for_departed(kube_control): with open(token_auth_file, 'w') as fp: fp.writelines(known_tokens) # Trigger rebroadcast of auth files for followers - remove_state('autentication.setup') + remove_state('authentication.setup') @when_not('kube-control.connected') @@ -706,9 +699,8 @@ def initial_nrpe_config(nagios=None): 'kubernetes-master.components.started') def switch_auth_mode(): config = hookenv.config() - mode = config.get('authorization-mode').lower() - if mode in valid_auth_modes and \ - data_changed('auth-mode', mode): + mode = config.get('authorization-mode') + if data_changed('auth-mode', mode): remove_state('kubernetes-master.components.started') @@ -1063,11 +1055,11 @@ def configure_apiserver(): 'DefaultTolerationSeconds' ] - if hookenv.config('authorization-mode').lower() == 'rbac': + auth_mode = hookenv.config('authorization-mode') + if 'Node' in auth_mode: admission_control.append('NodeRestriction') - api_opts.add('authorization-mode', 'Node,RBAC', strict=True) - else: - api_opts.add('authorization-mode', 'AlwaysAllow', strict=True) + + api_opts.add('authorization-mode', auth_mode, strict=True) if get_version('kube-apiserver') < (1, 6): hookenv.log('Removing DefaultTolerationSeconds from admission-control') diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index ff537a8828a..d05468944dd 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -52,8 +52,6 @@ kubeclientconfig_path = '/root/.kube/config' os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') -db = unitdata.kv() - @hook('upgrade-charm') def upgrade_charm(): @@ -338,7 +336,8 @@ def start_worker(kube_api, kube_control, auth_control, cni): hookenv.log('Waiting for cluster cidr.') return - creds = db.get('credentials') + nodeuser = 'system:node:{}'.format(gethostname()) + creds = kube_control.get_auth_credentials(nodeuser) data_changed('kube-control.creds', creds) # set --allow-privileged flag for kubelet @@ -830,7 +829,6 @@ def catch_change_in_creds(kube_control): if creds \ and data_changed('kube-control.creds', creds) \ and creds['user'] == nodeuser: - db.set('credentials', creds) set_state('worker.auth.bootstrapped') set_state('kubernetes-worker.restart-needed')