* elastic now provides a fully opensource version for their prebuild docker images (elasticsearch, kibana and so on). To avoid running into licensing conflicts for this addon example, we should rather use these images instead of the premium ones (were we also have to disable premium features manually right now) * remove disable flags for xpack, since *-oss images do not include this anymore * bump elasticsearch and kibana version from 5.6.4 to 6.2.4 * use oss version from elastic as baseimg for kibana and elasticsearch * bump fluentd version to ~>1.1.3 * bump gem 'fluent-plugin-elasticsearch' to '~>2.9.1' to allow usage of elasticsearch 6.x * bump fluentd-es-image to v2.1.0 * fix elasticserach run.sh to align with new elasticsearch upstream container structure
111 lines
2.8 KiB
YAML
111 lines
2.8 KiB
YAML
# RBAC authn and authz
|
|
apiVersion: v1
|
|
kind: ServiceAccount
|
|
metadata:
|
|
name: elasticsearch-logging
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: elasticsearch-logging
|
|
kubernetes.io/cluster-service: "true"
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
---
|
|
kind: ClusterRole
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
name: elasticsearch-logging
|
|
labels:
|
|
k8s-app: elasticsearch-logging
|
|
kubernetes.io/cluster-service: "true"
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
rules:
|
|
- apiGroups:
|
|
- ""
|
|
resources:
|
|
- "services"
|
|
- "namespaces"
|
|
- "endpoints"
|
|
verbs:
|
|
- "get"
|
|
---
|
|
kind: ClusterRoleBinding
|
|
apiVersion: rbac.authorization.k8s.io/v1
|
|
metadata:
|
|
namespace: kube-system
|
|
name: elasticsearch-logging
|
|
labels:
|
|
k8s-app: elasticsearch-logging
|
|
kubernetes.io/cluster-service: "true"
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
subjects:
|
|
- kind: ServiceAccount
|
|
name: elasticsearch-logging
|
|
namespace: kube-system
|
|
apiGroup: ""
|
|
roleRef:
|
|
kind: ClusterRole
|
|
name: elasticsearch-logging
|
|
apiGroup: ""
|
|
---
|
|
# Elasticsearch deployment itself
|
|
apiVersion: apps/v1
|
|
kind: StatefulSet
|
|
metadata:
|
|
name: elasticsearch-logging
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: elasticsearch-logging
|
|
version: v6.2.4
|
|
kubernetes.io/cluster-service: "true"
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
spec:
|
|
serviceName: elasticsearch-logging
|
|
replicas: 2
|
|
selector:
|
|
matchLabels:
|
|
k8s-app: elasticsearch-logging
|
|
version: v6.2.4
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: elasticsearch-logging
|
|
version: v6.2.4
|
|
kubernetes.io/cluster-service: "true"
|
|
spec:
|
|
serviceAccountName: elasticsearch-logging
|
|
containers:
|
|
- image: k8s.gcr.io/elasticsearch:v6.2.4
|
|
name: elasticsearch-logging
|
|
resources:
|
|
# need more cpu upon initialization, therefore burstable class
|
|
limits:
|
|
cpu: 1000m
|
|
requests:
|
|
cpu: 100m
|
|
ports:
|
|
- containerPort: 9200
|
|
name: db
|
|
protocol: TCP
|
|
- containerPort: 9300
|
|
name: transport
|
|
protocol: TCP
|
|
volumeMounts:
|
|
- name: elasticsearch-logging
|
|
mountPath: /data
|
|
env:
|
|
- name: "NAMESPACE"
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: metadata.namespace
|
|
volumes:
|
|
- name: elasticsearch-logging
|
|
emptyDir: {}
|
|
# Elasticsearch requires vm.max_map_count to be at least 262144.
|
|
# If your OS already sets up this number to a higher value, feel free
|
|
# to remove this init container.
|
|
initContainers:
|
|
- image: alpine:3.6
|
|
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
|
|
name: elasticsearch-logging-init
|
|
securityContext:
|
|
privileged: true
|