updated gems and versions
This commit is contained in:
@@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global BGP Configuration
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalbgpconfigs.crd.projectcalico.org
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global Felix Configuration
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalfelixconfigs.crd.projectcalico.org
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico Global Network Policies
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
description: Calico IP Pools
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
approvers:
|
||||
- floreks
|
||||
- maciaszczykm
|
||||
- bryk
|
||||
reviewers:
|
||||
- cheld
|
||||
- cupofcat
|
||||
- danielromlein
|
||||
- floreks
|
||||
- ianlewis
|
||||
- konryd
|
||||
- maciaszczykm
|
||||
- mhenc
|
||||
- rf232
|
||||
|
||||
@@ -29,6 +29,6 @@ all: transform
|
||||
%.sed: %.base
|
||||
sed -f transforms2sed.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
transform: kube-dns.yaml.in kube-dns.yaml.sed coredns.yaml.in coredns.yaml.sed
|
||||
transform: coredns.yaml.in coredns.yaml.sed
|
||||
|
||||
.PHONY: transform
|
||||
@@ -66,6 +66,7 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
reload
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@@ -105,7 +106,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.6
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@@ -117,6 +118,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@@ -124,6 +126,9 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@@ -133,6 +138,14 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
@@ -147,6 +160,8 @@ kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
@@ -66,6 +66,7 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
reload
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@@ -105,7 +106,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.6
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@@ -117,6 +118,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@@ -124,6 +126,9 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@@ -133,6 +138,14 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
@@ -147,6 +160,8 @@ kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
@@ -66,6 +66,7 @@ data:
|
||||
prometheus :9153
|
||||
proxy . /etc/resolv.conf
|
||||
cache 30
|
||||
reload
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
@@ -105,7 +106,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: coredns
|
||||
image: coredns/coredns:1.0.6
|
||||
image: k8s.gcr.io/coredns:1.1.3
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@@ -117,6 +118,7 @@ spec:
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
@@ -124,6 +126,9 @@ spec:
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
@@ -133,6 +138,14 @@ spec:
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
@@ -147,6 +160,8 @@ kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
34
cluster/addons/dns/kube-dns/Makefile
Normal file
34
cluster/addons/dns/kube-dns/Makefile
Normal file
@@ -0,0 +1,34 @@
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Makefile for the kubedns underscore templates to Salt/Pillar and other formats.
|
||||
|
||||
# If you update the *.base templates, please run this Makefile before pushing.
|
||||
#
|
||||
# Usage:
|
||||
# make
|
||||
|
||||
all: transform
|
||||
|
||||
# .base -> .in pattern rule
|
||||
%.in: %.base
|
||||
sed -f transforms2salt.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
# .base -> .sed pattern rule
|
||||
%.sed: %.base
|
||||
sed -f transforms2sed.sed $< | sed s/__SOURCE_FILENAME__/$</g > $@
|
||||
|
||||
transform: kube-dns.yaml.in kube-dns.yaml.sed
|
||||
|
||||
.PHONY: transform
|
||||
@@ -9,7 +9,7 @@ can use the DNS Service’s IP to resolve DNS names.
|
||||
## Manually scale kube-dns Deployment
|
||||
|
||||
kube-dns creates only one DNS Pod by default. If
|
||||
[dns-horizontal-autoscaler](../dns-horizontal-autoscaler/)
|
||||
[dns-horizontal-autoscaler](../../dns-horizontal-autoscaler/)
|
||||
is not enabled, you may need to manually scale kube-dns Deployment.
|
||||
|
||||
Please use below `kubectl scale` command to scale:
|
||||
@@ -18,9 +18,9 @@ kubectl --namespace=kube-system scale deployment kube-dns --replicas=<NUM_YOU_WA
|
||||
```
|
||||
|
||||
Do not use `kubectl edit` to modify kube-dns Deployment object if it is
|
||||
controlled by [Addon Manager](../addon-manager/). Otherwise the modifications
|
||||
controlled by [Addon Manager](../../addon-manager/). Otherwise the modifications
|
||||
will be clobbered, in addition the replicas count for kube-dns Deployment will
|
||||
be reset to 1. See [Cluster add-ons README](../README.md) and
|
||||
be reset to 1. See [Cluster add-ons README](../../README.md) and
|
||||
[#36411](https://github.com/kubernetes/kubernetes/issues/36411) for reference.
|
||||
|
||||
## kube-dns addon templates
|
||||
4
cluster/addons/dns/kube-dns/transforms2salt.sed
Normal file
4
cluster/addons/dns/kube-dns/transforms2salt.sed
Normal file
@@ -0,0 +1,4 @@
|
||||
s/__PILLAR__DNS__SERVER__/{{ pillar['dns_server'] }}/g
|
||||
s/__PILLAR__DNS__DOMAIN__/{{ pillar['dns_domain'] }}/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/{{ pillar['service_cluster_ip_range'] }}/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
||||
4
cluster/addons/dns/kube-dns/transforms2sed.sed
Normal file
4
cluster/addons/dns/kube-dns/transforms2sed.sed
Normal file
@@ -0,0 +1,4 @@
|
||||
s/__PILLAR__DNS__SERVER__/$DNS_SERVER_IP/g
|
||||
s/__PILLAR__DNS__DOMAIN__/$DNS_DOMAIN/g
|
||||
s/__PILLAR__CLUSTER_CIDR__/$SERVICE_CLUSTER_IP_RANGE/g
|
||||
s/__MACHINE_GENERATED_WARNING__/Warning: This is a file generated from the base underscore template file: __SOURCE_FILENAME__/g
|
||||
@@ -19,15 +19,16 @@ a Deployment, but allows for maintaining state on storage volumes.
|
||||
|
||||
### Security
|
||||
|
||||
Elasticsearch has capabilities to enable authorization using the
|
||||
[X-Pack plugin][xPack]. See configuration parameter `xpack.security.enabled`
|
||||
in Elasticsearch and Kibana configurations. It can also be set via the
|
||||
`XPACK_SECURITY_ENABLED` env variable. After enabling the feature,
|
||||
follow [official documentation][setupCreds] to set up credentials in
|
||||
Elasticsearch and Kibana. Don't forget to propagate those credentials also to
|
||||
Fluentd in its [configuration][fluentdCreds], using for example
|
||||
[environment variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap]
|
||||
and [Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
Elasticsearch has capabilities to enable authorization using the [X-Pack
|
||||
plugin][xPack]. For the sake of simplicity this example uses the fully open
|
||||
source prebuild images from elastic that do not contain the X-Pack plugin. If
|
||||
you need these features, please consider building the images from either the
|
||||
"basic" or "platinum" version. After enabling these features, follow [official
|
||||
documentation][setupCreds] to set up credentials in Elasticsearch and Kibana.
|
||||
Don't forget to propagate those credentials also to Fluentd in its
|
||||
[configuration][fluentdCreds], using for example [environment
|
||||
variables][fluentdEnvVar]. You can utilize [ConfigMaps][configMap] and
|
||||
[Secrets][secret] to store credentials in the Kubernetes apiserver.
|
||||
|
||||
### Initialization
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch:5.6.4
|
||||
FROM docker.elastic.co/elasticsearch/elasticsearch-oss:6.2.4
|
||||
|
||||
VOLUME ["/data"]
|
||||
EXPOSE 9200 9300
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
PREFIX = staging-k8s.gcr.io
|
||||
IMAGE = elasticsearch
|
||||
TAG = v5.6.4
|
||||
TAG = v6.2.4
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
||||
@@ -12,6 +12,3 @@ path.data: /data
|
||||
network.host: 0.0.0.0
|
||||
|
||||
discovery.zen.minimum_master_nodes: ${MINIMUM_MASTER_NODES}
|
||||
|
||||
xpack.security.enabled: false
|
||||
xpack.monitoring.enabled: false
|
||||
|
||||
@@ -26,4 +26,4 @@ export MINIMUM_MASTER_NODES=${MINIMUM_MASTER_NODES:-2}
|
||||
chown -R elasticsearch:elasticsearch /data
|
||||
|
||||
./bin/elasticsearch_logging_discovery >> ./config/elasticsearch.yml
|
||||
exec su elasticsearch -c ./bin/es-docker
|
||||
exec su elasticsearch -c /usr/local/bin/docker-entrypoint.sh
|
||||
|
||||
@@ -54,7 +54,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.2.4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
@@ -63,17 +63,17 @@ spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.2.4
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v5.6.4
|
||||
version: v6.2.4
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceAccountName: elasticsearch-logging
|
||||
containers:
|
||||
- image: k8s.gcr.io/elasticsearch:v5.6.4
|
||||
- image: k8s.gcr.io/elasticsearch:v6.2.4
|
||||
name: elasticsearch-logging
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
|
||||
@@ -48,35 +48,36 @@ roleRef:
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-es-v2.1.0
|
||||
name: fluentd-es-v2.2.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.1.0
|
||||
version: v2.2.0
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: fluentd-es
|
||||
version: v2.1.0
|
||||
version: v2.2.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v2.1.0
|
||||
version: v2.2.0
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: fluentd-es
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: k8s.gcr.io/fluentd-elasticsearch:v2.1.0
|
||||
image: k8s.gcr.io/fluentd-elasticsearch:v2.2.0
|
||||
env:
|
||||
- name: FLUENTD_ARGS
|
||||
value: --no-supervisor -q
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
source 'https://rubygems.org'
|
||||
|
||||
gem 'fluentd', '<=1.2.1'
|
||||
gem 'fluentd', '<=1.2.2'
|
||||
gem 'activesupport', '~>5.2.0'
|
||||
gem 'fluent-plugin-kubernetes_metadata_filter', '~>2.1.2'
|
||||
gem 'fluent-plugin-elasticsearch', '~>2.10.1'
|
||||
gem 'fluent-plugin-systemd', '~>1.0.0'
|
||||
gem 'fluent-plugin-elasticsearch', '~>2.10.5'
|
||||
gem 'fluent-plugin-systemd', '~>1.0.1'
|
||||
gem 'fluent-plugin-detect-exceptions', '~>0.0.11'
|
||||
gem 'fluent-plugin-prometheus', '~>1.0.1'
|
||||
gem 'fluent-plugin-multi-format-parser', '~>1.0.0'
|
||||
gem 'oj', '~>3.6.0'
|
||||
gem 'oj', '~>3.6.2'
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
PREFIX = staging-k8s.gcr.io
|
||||
IMAGE = fluentd-elasticsearch
|
||||
TAG = v2.1.0
|
||||
TAG = v2.2.0
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
||||
@@ -16,10 +16,12 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: docker.elastic.co/kibana/kibana:5.6.4
|
||||
image: docker.elastic.co/kibana/kibana-oss:6.2.4
|
||||
resources:
|
||||
# need more cpu upon initialization, therefore burstable class
|
||||
limits:
|
||||
@@ -31,10 +33,6 @@ spec:
|
||||
value: http://elasticsearch-logging:9200
|
||||
- name: SERVER_BASEPATH
|
||||
value: /api/v1/namespaces/kube-system/services/kibana-logging/proxy
|
||||
- name: XPACK_MONITORING_ENABLED
|
||||
value: "false"
|
||||
- name: XPACK_SECURITY_ENABLED
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: ui
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,68 @@
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cluster-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
# leader election
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
resourceNames: ["cluster-autoscaler"]
|
||||
verbs: ["get", "update", "patch", "delete"]
|
||||
# accessing & modifying cluster state (nodes & pods)
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods/eviction"]
|
||||
verbs: ["create"]
|
||||
# read-only access to cluster state
|
||||
- apiGroups: [""]
|
||||
resources: ["services", "replicationcontrollers", "persistentvolumes", "persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps", "extensions"]
|
||||
resources: ["daemonsets", "replicasets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["policy"]
|
||||
resources: ["poddisruptionbudgets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
# misc access
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["create"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resourceNames: ["cluster-autoscaler-status"]
|
||||
verbs: ["get", "update", "patch", "delete"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: cluster-autoscaler
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
subjects:
|
||||
- kind: User
|
||||
name: cluster-autoscaler
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: cluster-autoscaler
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
@@ -26,7 +26,7 @@ export KUBE_CONFIG_FILE=${KUBE_CONFIG_FILE:-${KUBE_ROOT}/cluster/centos/config-d
|
||||
|
||||
function deploy_dns {
|
||||
echo "Deploying DNS on Kubernetes"
|
||||
cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns.yaml.sed" kube-dns.yaml
|
||||
cp "${KUBE_ROOT}/cluster/addons/dns/kube-dns/kube-dns.yaml.sed" kube-dns.yaml
|
||||
sed -i -e "s/\\\$DNS_DOMAIN/${DNS_DOMAIN}/g" kube-dns.yaml
|
||||
sed -i -e "s/\\\$DNS_SERVER_IP/${DNS_SERVER_IP}/g" kube-dns.yaml
|
||||
|
||||
|
||||
@@ -180,6 +180,16 @@ if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
|
||||
fi
|
||||
|
||||
# Optional: Enable netd.
|
||||
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
|
||||
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
|
||||
|
||||
# To avoid running netd on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${ENABLE_NETD:-} == "true" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
|
||||
fi
|
||||
|
||||
# Enable metadata concealment by firewalling pod traffic to the metadata server
|
||||
# and run a proxy daemonset on nodes.
|
||||
#
|
||||
|
||||
@@ -191,11 +191,11 @@ TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:---min-resync-period=3m
|
||||
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
|
||||
|
||||
KUBELET_TEST_ARGS="${KUBELET_TEST_ARGS:-} --serialize-image-pulls=false ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then
|
||||
NODE_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
|
||||
fi
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
MASTER_KUBELET_TEST_ARGS=" --experimental-kernel-memcg-notification=true"
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "gci" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "ubuntu" ]] || [[ "${NODE_OS_DISTRIBUTION}" == "custom" ]]; then
|
||||
NODE_KUBELET_TEST_ARGS="${NODE_KUBELET_TEST_ARGS:-} --experimental-kernel-memcg-notification=true"
|
||||
fi
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || [[ "${MASTER_OS_DISTRIBUTION}" == "ubuntu" ]]; then
|
||||
MASTER_KUBELET_TEST_ARGS="${MASTER_KUBELET_TEST_ARGS:-} --experimental-kernel-memcg-notification=true"
|
||||
fi
|
||||
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-} --vmodule=httplog=3 --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1,settings.k8s.io/v1alpha1 ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT}"
|
||||
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE}"
|
||||
@@ -212,6 +212,16 @@ NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
|
||||
NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
|
||||
|
||||
# Optional: Enable netd.
|
||||
ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
|
||||
CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
|
||||
|
||||
# To avoid running netd on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${ENABLE_NETD:-} == "true" ]]; then
|
||||
NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}beta.kubernetes.io/kube-netd-ready=true"
|
||||
fi
|
||||
|
||||
# To avoid running Calico on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
|
||||
@@ -545,6 +545,9 @@ function create-master-auth {
|
||||
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
|
||||
fi
|
||||
if [[ -n "${KUBE_CLUSTER_AUTOSCALER_TOKEN:-}" ]]; then
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_CLUSTER_AUTOSCALER_TOKEN}," "cluster-autoscaler,uid:cluster-autoscaler"
|
||||
fi
|
||||
if [[ -n "${KUBE_PROXY_TOKEN:-}" ]]; then
|
||||
append_or_replace_prefixed_line "${known_tokens_csv}" "${KUBE_PROXY_TOKEN}," "system:kube-proxy,uid:kube_proxy"
|
||||
fi
|
||||
@@ -1006,6 +1009,30 @@ current-context: kube-scheduler
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-clusterautoscaler-kubeconfig {
|
||||
echo "Creating cluster-autoscaler kubeconfig file"
|
||||
mkdir -p /etc/srv/kubernetes/cluster-autoscaler
|
||||
cat <<EOF >/etc/srv/kubernetes/cluster-autoscaler/kubeconfig
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: cluster-autoscaler
|
||||
user:
|
||||
token: ${KUBE_CLUSTER_AUTOSCALER_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://localhost:443
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: cluster-autoscaler
|
||||
name: cluster-autoscaler
|
||||
current-context: cluster-autoscaler
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-kubescheduler-policy-config {
|
||||
echo "Creating kube-scheduler policy config file"
|
||||
mkdir -p /etc/srv/kubernetes/kube-scheduler
|
||||
@@ -1207,7 +1234,15 @@ function prepare-kube-proxy-manifest-variables {
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
if [[ "${KUBE_PROXY_MODE:-}" == "ipvs" ]];then
|
||||
params+=" --proxy-mode=ipvs --feature-gates=SupportIPVSProxyMode=true"
|
||||
sudo modprobe -a ip_vs ip_vs_rr ip_vs_wrr ip_vs_sh nf_conntrack_ipv4
|
||||
if [[ $? -eq 0 ]];
|
||||
then
|
||||
params+=" --proxy-mode=ipvs"
|
||||
else
|
||||
# If IPVS modules are not present, make sure the node does not come up as
|
||||
# healthy.
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
params+=" --iptables-sync-period=1m --iptables-min-sync-period=10s --ipvs-sync-period=1m --ipvs-min-sync-period=10s"
|
||||
if [[ -n "${KUBEPROXY_TEST_ARGS:-}" ]]; then
|
||||
@@ -1970,12 +2005,15 @@ function start-kube-scheduler {
|
||||
function start-cluster-autoscaler {
|
||||
if [[ "${ENABLE_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
|
||||
echo "Start kubernetes cluster autoscaler"
|
||||
setup-addon-manifests "addons" "rbac/cluster-autoscaler"
|
||||
create-clusterautoscaler-kubeconfig
|
||||
prepare-log-file /var/log/cluster-autoscaler.log
|
||||
|
||||
# Remove salt comments and replace variables with values
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/cluster-autoscaler.manifest"
|
||||
|
||||
local params="${AUTOSCALER_MIG_CONFIG} ${CLOUD_CONFIG_OPT} ${AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/cluster-autoscaler/kubeconfig"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_mount}}@${CLOUD_CONFIG_MOUNT}@g" "${src_file}"
|
||||
sed -i -e "s@{{cloud_config_volume}}@${CLOUD_CONFIG_VOLUME}@g" "${src_file}"
|
||||
@@ -2175,8 +2213,8 @@ function update-dashboard-controller {
|
||||
|
||||
# Sets up the manifests of coreDNS for k8s addons.
|
||||
function setup-coredns-manifest {
|
||||
local -r coredns_file="${dst_dir}/dns/coredns.yaml"
|
||||
mv "${dst_dir}/dns/coredns.yaml.in" "${coredns_file}"
|
||||
local -r coredns_file="${dst_dir}/dns/coredns/coredns.yaml"
|
||||
mv "${dst_dir}/dns/coredns/coredns.yaml.in" "${coredns_file}"
|
||||
# Replace the salt configurations with variable values.
|
||||
sed -i -e "s@{{ *pillar\['dns_domain'\] *}}@${DNS_DOMAIN}@g" "${coredns_file}"
|
||||
sed -i -e "s@{{ *pillar\['dns_server'\] *}}@${DNS_SERVER_IP}@g" "${coredns_file}"
|
||||
@@ -2215,8 +2253,8 @@ function setup-fluentd {
|
||||
|
||||
# Sets up the manifests of kube-dns for k8s addons.
|
||||
function setup-kube-dns-manifest {
|
||||
local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml"
|
||||
mv "${dst_dir}/dns/kube-dns.yaml.in" "${kubedns_file}"
|
||||
local -r kubedns_file="${dst_dir}/dns/kube-dns/kube-dns.yaml"
|
||||
mv "${dst_dir}/dns/kube-dns/kube-dns.yaml.in" "${kubedns_file}"
|
||||
if [ -n "${CUSTOM_KUBE_DNS_YAML:-}" ]; then
|
||||
# Replace with custom GKE kube-dns deployment.
|
||||
cat > "${kubedns_file}" <<EOF
|
||||
@@ -2235,6 +2273,19 @@ EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# Sets up the manifests of netd for k8s addons.
|
||||
function setup-netd-manifest {
|
||||
local -r netd_file="${dst_dir}/netd/netd.yaml"
|
||||
mkdir -p "${dst_dir}/netd"
|
||||
touch "${netd_file}"
|
||||
if [ -n "${CUSTOM_NETD_YAML:-}" ]; then
|
||||
# Replace with custom GCP netd deployment.
|
||||
cat > "${netd_file}" <<EOF
|
||||
$(echo "$CUSTOM_NETD_YAML")
|
||||
EOF
|
||||
fi
|
||||
}
|
||||
|
||||
# Prepares the manifests of k8s addons, and starts the addon manager.
|
||||
# Vars assumed:
|
||||
# CLUSTER_NAME
|
||||
@@ -2341,13 +2392,17 @@ EOF
|
||||
setup-addon-manifests "addons" "device-plugins/nvidia-gpu"
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns"
|
||||
if [[ "${CLUSTER_DNS_CORE_DNS:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns/coredns"
|
||||
setup-coredns-manifest
|
||||
else
|
||||
setup-addon-manifests "addons" "dns/kube-dns"
|
||||
setup-kube-dns-manifest
|
||||
fi
|
||||
fi
|
||||
if [[ "${ENABLE_NETD:-}" == "true" ]]; then
|
||||
setup-netd-manifest
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION:-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING:-}" == "true" ]]; then
|
||||
@@ -2570,9 +2625,10 @@ function main() {
|
||||
fi
|
||||
fi
|
||||
|
||||
# generate the controller manager and scheduler tokens here since they are only used on the master.
|
||||
# generate the controller manager, scheduler and cluster autoscaler tokens here since they are only used on the master.
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
|
||||
setup-os-params
|
||||
config-ip-firewall
|
||||
|
||||
@@ -264,6 +264,18 @@ runtime-endpoint: ${CONTAINER_RUNTIME_ENDPOINT:-unix:///var/run/dockershim.sock}
|
||||
EOF
|
||||
}
|
||||
|
||||
function install-exec-auth-plugin {
|
||||
if [[ ! "${EXEC_AUTH_PLUGIN_URL:-}" ]]; then
|
||||
return
|
||||
fi
|
||||
local -r plugin_url="${EXEC_AUTH_PLUGIN_URL}"
|
||||
local -r plugin_sha1="${EXEC_AUTH_PLUGIN_SHA1}"
|
||||
|
||||
echo "Downloading gke-exec-auth-plugin binary"
|
||||
download-or-bust "${plugin_sha1}" "${plugin_url}"
|
||||
mv "${KUBE_HOME}/gke-exec-auth-plugin" "${KUBE_BIN}"
|
||||
}
|
||||
|
||||
function install-kube-manifests {
|
||||
# Put kube-system pods manifests in ${KUBE_HOME}/kube-manifests/.
|
||||
local dst_dir="${KUBE_HOME}/kube-manifests"
|
||||
@@ -403,6 +415,10 @@ function install-kube-binary-config {
|
||||
# Install crictl on each node.
|
||||
install-crictl
|
||||
|
||||
if [[ "${KUBERNETES_MASTER:-}" == "false" ]]; then
|
||||
install-exec-auth-plugin
|
||||
fi
|
||||
|
||||
# Clean up.
|
||||
rm -rf "${KUBE_HOME}/kubernetes"
|
||||
rm -f "${KUBE_HOME}/${server_binary_tar}"
|
||||
|
||||
@@ -9,6 +9,7 @@ pkg_tar(
|
||||
mode = "0644",
|
||||
)
|
||||
|
||||
# if you update this, also update function kube::release::package_kube_manifests_tarball() in build/lib/release.sh
|
||||
filegroup(
|
||||
name = "manifests",
|
||||
srcs = [
|
||||
|
||||
@@ -7,6 +7,9 @@
|
||||
"labels": {
|
||||
"tier": "cluster-management",
|
||||
"component": "cluster-autoscaler"
|
||||
},
|
||||
"annotations": {
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
@@ -14,7 +17,7 @@
|
||||
"containers": [
|
||||
{
|
||||
"name": "cluster-autoscaler",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.2.2",
|
||||
"image": "k8s.gcr.io/cluster-autoscaler:v1.3.0",
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/health-check",
|
||||
@@ -25,7 +28,7 @@
|
||||
},
|
||||
"command": [
|
||||
"./run.sh",
|
||||
"--kubernetes=http://127.0.0.1:8080?inClusterConfig=f",
|
||||
"--kubernetes=https://127.0.0.1:443",
|
||||
"--v=4",
|
||||
"--logtostderr=true",
|
||||
"--write-status-configmap=true",
|
||||
@@ -56,6 +59,11 @@
|
||||
"readOnly": true,
|
||||
"mountPath": "/usr/share/ca-certificates"
|
||||
},
|
||||
{
|
||||
"name": "srvkube",
|
||||
"readOnly": true,
|
||||
"mountPath": "/etc/srv/kubernetes/cluster-autoscaler"
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
"mountPath": "/var/log/cluster-autoscaler.log",
|
||||
@@ -80,6 +88,12 @@
|
||||
"path": "/usr/share/ca-certificates"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "/etc/srv/kubernetes/cluster-autoscaler"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
"hostPath": {
|
||||
|
||||
@@ -5,6 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
labels:
|
||||
k8s-app: etcd-empty-dir-cleanup
|
||||
spec:
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
"name":"etcd-server{{ suffix }}",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
@@ -62,7 +63,7 @@
|
||||
"ports": [
|
||||
{ "name": "serverport",
|
||||
"containerPort": {{ server_port }},
|
||||
"hostPort": {{ server_port }}
|
||||
"hostPort": {{ server_port }}
|
||||
},
|
||||
{ "name": "clientport",
|
||||
"containerPort": {{ port }},
|
||||
|
||||
@@ -5,6 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
labels:
|
||||
k8s-app: gcp-lb-controller
|
||||
version: v1.1.1
|
||||
|
||||
@@ -5,6 +5,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
labels:
|
||||
component: kube-addon-manager
|
||||
spec:
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
"name":"kube-apiserver",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
},
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
"name":"kube-controller-manager",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
},
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
|
||||
@@ -5,7 +5,8 @@
|
||||
"name":"kube-scheduler",
|
||||
"namespace": "kube-system",
|
||||
"annotations": {
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": ""
|
||||
"scheduler.alpha.kubernetes.io/critical-pod": "",
|
||||
"seccomp.security.alpha.kubernetes.io/pod": "docker/default"
|
||||
},
|
||||
"labels": {
|
||||
"tier": "control-plane",
|
||||
|
||||
@@ -642,7 +642,7 @@ function construct-kubelet-flags {
|
||||
# Network plugin
|
||||
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
|
||||
flags+=" --cni-bin-dir=/home/kubernetes/bin"
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" || "${ENABLE_NETD:-}" == "true" ]]; then
|
||||
# Calico uses CNI always.
|
||||
# Note that network policy won't work for master node.
|
||||
if [[ "${master}" == "true" ]]; then
|
||||
@@ -653,15 +653,13 @@ function construct-kubelet-flags {
|
||||
else
|
||||
# Otherwise use the configured value.
|
||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||
|
||||
fi
|
||||
fi
|
||||
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
|
||||
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
|
||||
fi
|
||||
flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}"
|
||||
if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then
|
||||
flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}"
|
||||
fi
|
||||
local node_labels=$(build-node-labels ${master})
|
||||
if [[ -n "${node_labels:-}" ]]; then
|
||||
flags+=" --node-labels=${node_labels}"
|
||||
@@ -899,6 +897,9 @@ REGION: $(yaml-quote ${REGION})
|
||||
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR})
|
||||
KUBELET_ARGS: $(yaml-quote ${KUBELET_ARGS})
|
||||
REQUIRE_METADATA_KUBELET_CONFIG_FILE: $(yaml-quote true)
|
||||
ENABLE_NETD: $(yaml-quote ${ENABLE_NETD:-false})
|
||||
CUSTOM_NETD_YAML: |
|
||||
$(echo "${CUSTOM_NETD_YAML:-}" | sed -e "s/'/''/g")
|
||||
EOF
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] || \
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]] || \
|
||||
|
||||
@@ -294,7 +294,7 @@ function dump_nodes_with_logexporter() {
|
||||
local -r service_account_credentials="$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64 | tr -d '\n')"
|
||||
local -r cloud_provider="${KUBERNETES_PROVIDER}"
|
||||
local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}"
|
||||
local -r logexport_sleep_seconds="$(( 90 + NUM_NODES / 5 ))"
|
||||
local -r logexport_sleep_seconds="$(( 90 + NUM_NODES / 3 ))"
|
||||
|
||||
# Fill in the parameters in the logexporter daemonset template.
|
||||
sed -i'' -e "s@{{.LogexporterNamespace}}@${logexporter_namespace}@g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
|
||||
@@ -345,6 +345,7 @@ function dump_nodes_with_logexporter() {
|
||||
done
|
||||
|
||||
# Delete the logexporter resources and dump logs for the failed nodes (if any) through SSH.
|
||||
"${KUBECTL}" get pods --namespace "${logexporter_namespace}" || true
|
||||
"${KUBECTL}" delete namespace "${logexporter_namespace}" || true
|
||||
if [[ "${#failed_nodes[@]}" != 0 ]]; then
|
||||
echo -e "Dumping logs through SSH for the following nodes:\n${failed_nodes[@]}"
|
||||
|
||||
Reference in New Issue
Block a user