Merge pull request #53820 from juju-solutions/feature/rbac

Automatic merge from submit-queue (batch tested with PRs 53820, 53971). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add support for RBAC support to Kubernetes via Juju

**What this PR does / why we need it**: This PR add RBAC to the Juju deployment of Kubernetes

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: 

**Special notes for your reviewer**:

**Release note**:

```Canonical Distribution of Kubernetes offers configurable RBAC 
```
This commit is contained in:
Kubernetes Submit Queue
2017-10-24 09:32:15 -07:00
committed by GitHub
11 changed files with 315 additions and 44 deletions

View File

@@ -22,6 +22,7 @@ from charms.reactive import is_state
from charms.reactive import set_state from charms.reactive import set_state
from charms.reactive import when from charms.reactive import when
from charms.reactive import when_not from charms.reactive import when_not
from charms.reactive.helpers import data_changed
from charmhelpers.core import hookenv from charmhelpers.core import hookenv
@@ -31,6 +32,9 @@ from subprocess import check_call
from subprocess import check_output from subprocess import check_output
USER = 'system:e2e'
@hook('upgrade-charm') @hook('upgrade-charm')
def reset_delivery_states(): def reset_delivery_states():
''' Remove the state set when resources are unpacked. ''' ''' Remove the state set when resources are unpacked. '''
@@ -87,7 +91,8 @@ def install_snaps():
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved', @when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'kubernetes-master.available', 'tls_client.client.key.saved', 'kubernetes-master.available',
'kubernetes-e2e.installed', 'kube-control.auth.available') 'kubernetes-e2e.installed', 'e2e.auth.bootstrapped',
'kube-control.auth.available')
@when_not('kubeconfig.ready') @when_not('kubeconfig.ready')
def prepare_kubeconfig_certificates(master, kube_control): def prepare_kubeconfig_certificates(master, kube_control):
''' Prepare the data to feed to create the kubeconfig file. ''' ''' Prepare the data to feed to create the kubeconfig file. '''
@@ -95,7 +100,8 @@ def prepare_kubeconfig_certificates(master, kube_control):
layer_options = layer.options('tls-client') layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig. # Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path') ca = layer_options.get('ca_certificate_path')
creds = kube_control.get_auth_credentials() creds = kube_control.get_auth_credentials(USER)
data_changed('kube-control.creds', creds)
servers = get_kube_api_servers(master) servers = get_kube_api_servers(master)
@@ -118,11 +124,18 @@ def prepare_kubeconfig_certificates(master, kube_control):
def request_credentials(kube_control): def request_credentials(kube_control):
""" Request authorization creds.""" """ Request authorization creds."""
# The kube-cotrol interface is created to support RBAC. # Ask for a user, although we will be using the 'client_token'
# At this point we might as well do the right thing and return the hostname kube_control.set_auth_request(USER)
# even if it will only be used when we enable RBAC
user = 'system:masters'
kube_control.set_auth_request(user) @when('kube-control.auth.available')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
creds = kube_control.get_auth_credentials(USER)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == USER:
set_state('e2e.auth.bootstrapped')
@when('kubernetes-e2e.installed', 'kubeconfig.ready') @when('kubernetes-e2e.installed', 'kubeconfig.ready')

View File

@@ -54,6 +54,10 @@ The domain name to use for the Kubernetes cluster for DNS.
Enables the installation of Kubernetes dashboard, Heapster, Grafana, and Enables the installation of Kubernetes dashboard, Heapster, Grafana, and
InfluxDB. InfluxDB.
#### enable-rbac
Enable RBAC and Node authorisation.
# DNS for the cluster # DNS for the cluster
The DNS add-on allows the pods to have a DNS names in addition to IP addresses. The DNS add-on allows the pods to have a DNS names in addition to IP addresses.

View File

@@ -46,3 +46,9 @@ options:
runtime-config=batch/v2alpha1=true profiling=true runtime-config=batch/v2alpha1=true profiling=true
will result in kube-apiserver being run with the following options: will result in kube-apiserver being run with the following options:
--runtime-config=batch/v2alpha1=true --profiling=true --runtime-config=batch/v2alpha1=true --profiling=true
authorization-mode:
type: string
default: "AlwaysAllow"
description: |
Comma separated authorization modes. Allowed values are
"RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow".

View File

@@ -26,6 +26,8 @@ import ipaddress
import charms.leadership import charms.leadership
from shutil import move
from shlex import split from shlex import split
from subprocess import check_call from subprocess import check_call
from subprocess import check_output from subprocess import check_output
@@ -79,10 +81,41 @@ def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.''' '''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps() migrate_from_pre_snaps()
install_snaps() install_snaps()
add_rbac_roles()
set_state('reconfigure.authentication.setup') set_state('reconfigure.authentication.setup')
remove_state('authentication.setup') remove_state('authentication.setup')
def add_rbac_roles():
'''Update the known_tokens file with proper groups.'''
tokens_fname = '/root/cdk/known_tokens.csv'
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
move(tokens_fname, tokens_backup_fname)
with open(tokens_fname, 'w') as ftokens:
with open(tokens_backup_fname, 'r') as stream:
for line in stream:
record = line.strip().split(',')
# token, username, user, groups
if record[2] == 'admin' and len(record) == 3:
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
record[1],
record[2],
'system:masters')
ftokens.write(towrite)
continue
if record[2] == 'kube_proxy':
towrite = '{0},{1},{2}\n'.format(record[0],
'system:kube-proxy',
'kube-proxy')
ftokens.write(towrite)
continue
if record[2] == 'kubelet' and record[1] == 'kubelet':
continue
ftokens.write('{}'.format(line))
def rename_file_idempotent(source, destination): def rename_file_idempotent(source, destination):
if os.path.isfile(source): if os.path.isfile(source):
os.rename(source, destination) os.rename(source, destination)
@@ -209,12 +242,10 @@ def setup_leader_authentication():
if not get_keys_from_leader(keys) \ if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'): or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin') last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin') setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')
if not os.path.isfile(known_tokens): if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin') touch(known_tokens)
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key # Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True) os.makedirs('/root/cdk', exist_ok=True)
@@ -302,6 +333,7 @@ def get_keys_from_leader(keys, overwrite_local=False):
# Write out the file and move on to the next item # Write out the file and move on to the next item
with open(k, 'w+') as fp: with open(k, 'w+') as fp:
fp.write(contents) fp.write(contents)
fp.write('\n')
return True return True
@@ -399,20 +431,69 @@ def send_cluster_dns_detail(kube_control):
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested') @when('kube-control.connected')
@when('authentication.setup') @when('snap.installed.kubectl')
@when('leadership.is_leader') @when('leadership.is_leader')
def send_tokens(kube_control): def create_service_configs(kube_control):
"""Send the tokens to the workers.""" """Create the users for kubelet"""
kubelet_token = get_token('kubelet') should_restart = False
proxy_token = get_token('kube_proxy') # generate the username/pass for the requesting unit
admin_token = get_token('admin') proxy_token = get_token('system:kube-proxy')
if not proxy_token:
setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
proxy_token = get_token('system:kube-proxy')
should_restart = True
client_token = get_token('admin')
if not client_token:
setup_tokens(None, 'admin', 'admin', "system:masters")
client_token = get_token('admin')
should_restart = True
# Send the data
requests = kube_control.auth_user() requests = kube_control.auth_user()
for request in requests: for request in requests:
kube_control.sign_auth_request(request[0], kubelet_token, username = request[1]['user']
proxy_token, admin_token) group = request[1]['group']
kubelet_token = get_token(username)
if not kubelet_token and username and group:
# Usernames have to be in the form of system:node:<nodeName>
userid = "kubelet-{}".format(request[0].split('/')[1])
setup_tokens(None, username, userid, group)
kubelet_token = get_token(username)
kube_control.sign_auth_request(request[0], username,
kubelet_token, proxy_token,
client_token)
should_restart = True
if should_restart:
host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
@when('kube-control.departed')
@when('leadership.is_leader')
def flush_auth_for_departed(kube_control):
''' Unit has left the cluster and needs to have its authentication
tokens removed from the token registry '''
token_auth_file = '/root/cdk/known_tokens.csv'
departing_unit = kube_control.flush_departed()
userid = "kubelet-{}".format(departing_unit.split('/')[1])
known_tokens = open(token_auth_file, 'r').readlines()
for line in known_tokens[:]:
haystack = line.split(',')
# skip the entry if we dont have token,user,id,groups format
if len(haystack) < 4:
continue
if haystack[2] == userid:
hookenv.log('Found unit {} in token auth. Removing auth'
' token.'.format(userid))
known_tokens.remove(line)
# atomically rewrite the file minus any scrubbed units
hookenv.log('Rewriting token auth file: {}'.format(token_auth_file))
with open(token_auth_file, 'w') as fp:
fp.writelines(known_tokens)
# Trigger rebroadcast of auth files for followers
remove_state('authentication.setup')
@when_not('kube-control.connected') @when_not('kube-control.connected')
@@ -640,6 +721,15 @@ def initial_nrpe_config(nagios=None):
update_nrpe_config(nagios) update_nrpe_config(nagios)
@when('config.changed.authorization-mode',
'kubernetes-master.components.started')
def switch_auth_mode():
config = hookenv.config()
mode = config.get('authorization-mode')
if data_changed('auth-mode', mode):
remove_state('kubernetes-master.components.started')
@when('kubernetes-master.components.started') @when('kubernetes-master.components.started')
@when('nrpe-external-master.available') @when('nrpe-external-master.available')
@when_any('config.changed.nagios_context', @when_any('config.changed.nagios_context',
@@ -991,6 +1081,12 @@ def configure_apiserver():
'DefaultTolerationSeconds' 'DefaultTolerationSeconds'
] ]
auth_mode = hookenv.config('authorization-mode')
if 'Node' in auth_mode:
admission_control.append('NodeRestriction')
api_opts.add('authorization-mode', auth_mode, strict=True)
if get_version('kube-apiserver') < (1, 6): if get_version('kube-apiserver') < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control') hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control.remove('DefaultTolerationSeconds') admission_control.remove('DefaultTolerationSeconds')
@@ -1046,7 +1142,8 @@ def configure_scheduler():
set_state('kube-scheduler.do-restart') set_state('kube-scheduler.do-restart')
def setup_basic_auth(password=None, username='admin', uid='admin'): def setup_basic_auth(password=None, username='admin', uid='admin',
groups=None):
'''Create the htacces file and the tokens.''' '''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk' root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk): if not os.path.isdir(root_cdk):
@@ -1055,10 +1152,14 @@ def setup_basic_auth(password=None, username='admin', uid='admin'):
if not password: if not password:
password = token_generator() password = token_generator()
with open(htaccess, 'w') as stream: with open(htaccess, 'w') as stream:
stream.write('{0},{1},{2}'.format(password, username, uid)) if groups:
stream.write('{0},{1},{2},"{3}"'.format(password,
username, uid, groups))
else:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user): def setup_tokens(token, username, user, groups=None):
'''Create a token file for kubernetes authentication.''' '''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk' root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk): if not os.path.isdir(root_cdk):
@@ -1067,7 +1168,13 @@ def setup_tokens(token, username, user):
if not token: if not token:
token = token_generator() token = token_generator()
with open(known_tokens, 'a') as stream: with open(known_tokens, 'a') as stream:
stream.write('{0},{1},{2}\n'.format(token, username, user)) if groups:
stream.write('{0},{1},{2},"{3}"\n'.format(token,
username,
user,
groups))
else:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user): def get_password(csv_fname, user):
@@ -1133,3 +1240,10 @@ def apiserverVersion():
cmd = 'kube-apiserver --version'.split() cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8') version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3]) return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()

View File

@@ -34,7 +34,7 @@ if not context['replicas']:
context['replicas'] = 3 context['replicas'] = 3
# Declare a kubectl template when invoking kubectl # Declare a kubectl template when invoking kubectl
kubectl = ['kubectl', '--kubeconfig=/root/cdk/kubeconfig'] kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
# Remove deployment if requested # Remove deployment if requested
if context['delete']: if context['delete']:

View File

@@ -21,8 +21,8 @@ fi
# Cordon and drain the unit # Cordon and drain the unit
kubectl --kubeconfig=/root/cdk/kubeconfig cordon $(hostname) kubectl --kubeconfig=/root/.kube/config cordon $(hostname)
kubectl --kubeconfig=/root/cdk/kubeconfig drain $(hostname) ${EXTRA_FLAGS} kubectl --kubeconfig=/root/.kube/config drain $(hostname) ${EXTRA_FLAGS}
# Set status to indicate the unit is paused and under maintenance. # Set status to indicate the unit is paused and under maintenance.
status-set 'waiting' 'Kubernetes unit paused' status-set 'waiting' 'Kubernetes unit paused'

View File

@@ -57,7 +57,7 @@ if param_error:
context['ingress'] = action_get('ingress') context['ingress'] = action_get('ingress')
# Declare a kubectl template when invoking kubectl # Declare a kubectl template when invoking kubectl
kubectl = ['kubectl', '--kubeconfig=/root/cdk/kubeconfig'] kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
# Remove deployment if requested # Remove deployment if requested
if deletion: if deletion:

View File

@@ -4,5 +4,5 @@ set -ex
export PATH=$PATH:/snap/bin export PATH=$PATH:/snap/bin
kubectl --kubeconfig=/root/cdk/kubeconfig uncordon $(hostname) kubectl --kubeconfig=/root/.kube/config uncordon $(hostname)
status-set 'active' 'Kubernetes unit resumed' status-set 'active' 'Kubernetes unit resumed'

View File

@@ -47,11 +47,11 @@ from charmhelpers.contrib.charmsupport import nrpe
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$' nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig' kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm') @hook('upgrade-charm')
def upgrade_charm(): def upgrade_charm():
@@ -319,7 +319,8 @@ def watch_for_changes(kube_api, kube_control, cni):
'tls_client.client.key.saved', 'tls_client.server.certificate.saved', 'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved', 'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available', 'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed') 'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
def start_worker(kube_api, kube_control, auth_control, cni): def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.''' ''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api) servers = get_kube_api_servers(kube_api)
@@ -335,7 +336,8 @@ def start_worker(kube_api, kube_control, auth_control, cni):
hookenv.log('Waiting for cluster cidr.') hookenv.log('Waiting for cluster cidr.')
return return
creds = kube_control.get_auth_credentials() nodeuser = 'system:node:{}'.format(gethostname())
creds = kube_control.get_auth_credentials(nodeuser)
data_changed('kube-control.creds', creds) data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet # set --allow-privileged flag for kubelet
@@ -458,11 +460,13 @@ def create_config(server, creds):
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube'] cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd) check_call(cmd)
# Create kubernetes configuration in the default location for root. # Create kubernetes configuration in the default location for root.
create_kubeconfig('/root/.kube/config', server, ca, create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root') token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services. # Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca, create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet') token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def configure_worker_services(api_servers, dns, cluster_cidr): def configure_worker_services(api_servers, dns, cluster_cidr):
@@ -491,7 +495,7 @@ def configure_worker_services(api_servers, dns, cluster_cidr):
kube_proxy_opts = FlagManager('kube-proxy') kube_proxy_opts = FlagManager('kube-proxy')
kube_proxy_opts.add('cluster-cidr', cluster_cidr) kube_proxy_opts.add('cluster-cidr', cluster_cidr)
kube_proxy_opts.add('kubeconfig', kubeconfig_path) kube_proxy_opts.add('kubeconfig', kubeproxyconfig_path)
kube_proxy_opts.add('logtostderr', 'true') kube_proxy_opts.add('logtostderr', 'true')
kube_proxy_opts.add('v', '0') kube_proxy_opts.add('v', '0')
kube_proxy_opts.add('master', random.choice(api_servers), strict=True) kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
@@ -613,7 +617,7 @@ def get_kube_api_servers(kube_api):
def kubectl(*args): def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws ''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. ''' an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args) command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command)) hookenv.log('Executing {}'.format(command))
return check_output(command) return check_output(command)
@@ -817,11 +821,15 @@ def request_kubelet_and_proxy_credentials(kube_control):
kube_control.set_auth_request(nodeuser) kube_control.set_auth_request(nodeuser)
@when('kube-control.auth.available') @when('kube-control.connected')
def catch_change_in_creds(kube_control): def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected.""" """Request a service restart in case credential updates were detected."""
creds = kube_control.get_auth_credentials() nodeuser = 'system:node:{}'.format(gethostname())
if data_changed('kube-control.creds', creds): creds = kube_control.get_auth_credentials(nodeuser)
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed') set_state('kubernetes-worker.restart-needed')

View File

@@ -32,12 +32,13 @@ apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: default-http-backend name: default-http-backend
# namespace: kube-system
labels: labels:
app: default-http-backend k8s-app: default-http-backend
spec: spec:
ports: ports:
- port: 80 - port: 80
protocol: TCP protocol: TCP
targetPort: 80 targetPort: 80
selector: selector:
app: default-http-backend app: default-http-backend

View File

@@ -1,4 +1,128 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: default
---
apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: nginx-load-balancer-conf name: nginx-load-balancer-conf
@@ -23,6 +147,7 @@ spec:
# hostPort doesn't work with CNI, so we have to use hostNetwork instead # hostPort doesn't work with CNI, so we have to use hostNetwork instead
# see https://github.com/kubernetes/kubernetes/issues/23920 # see https://github.com/kubernetes/kubernetes/issues/23920
hostNetwork: true hostNetwork: true
serviceAccountName: nginx-ingress-serviceaccount
containers: containers:
- image: {{ ingress_image }} - image: {{ ingress_image }}
name: nginx-ingress-lb name: nginx-ingress-lb