
Kubernetes initiates "graceful shutdown" by sending SIGTERM to pid 1. The way the existing startup scripts worked, this signal arrived at the shell wrapper, not elasticsearch, and the shell wrapper exited, killing the container immediately. Before this change: 1 ? Ss 0:00 /bin/sh -c /run.sh 6 ? S 0:00 /bin/bash /run.sh 13 ? S 0:00 \_ /bin/su -c /elasticsearch/bin/elasticsearch elasticsearch 14 ? Ss 0:00 \_ sh -c /elasticsearch/bin/elasticsearch 15 ? Sl 19:18 \_ /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java ... org.elasticsearch.bootstrap.Elasticsearch start After this change: 1 ? Ssl 0:29 /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java ... org.elasticsearch.bootstrap.Elasticsearch start
50 lines
1.2 KiB
YAML
50 lines
1.2 KiB
YAML
apiVersion: v1
|
|
kind: ReplicationController
|
|
metadata:
|
|
name: elasticsearch-logging-v1
|
|
namespace: kube-system
|
|
labels:
|
|
k8s-app: elasticsearch-logging
|
|
version: v1
|
|
kubernetes.io/cluster-service: "true"
|
|
addonmanager.kubernetes.io/mode: Reconcile
|
|
spec:
|
|
replicas: 2
|
|
selector:
|
|
k8s-app: elasticsearch-logging
|
|
version: v1
|
|
template:
|
|
metadata:
|
|
labels:
|
|
k8s-app: elasticsearch-logging
|
|
version: v1
|
|
kubernetes.io/cluster-service: "true"
|
|
spec:
|
|
containers:
|
|
- image: gcr.io/google_containers/elasticsearch:v2.4.1-2
|
|
name: elasticsearch-logging
|
|
resources:
|
|
# need more cpu upon initialization, therefore burstable class
|
|
limits:
|
|
cpu: 1000m
|
|
requests:
|
|
cpu: 100m
|
|
ports:
|
|
- containerPort: 9200
|
|
name: db
|
|
protocol: TCP
|
|
- containerPort: 9300
|
|
name: transport
|
|
protocol: TCP
|
|
volumeMounts:
|
|
- name: es-persistent-storage
|
|
mountPath: /data
|
|
env:
|
|
- name: "NAMESPACE"
|
|
valueFrom:
|
|
fieldRef:
|
|
fieldPath: metadata.namespace
|
|
volumes:
|
|
- name: es-persistent-storage
|
|
emptyDir: {}
|