Files
kubernetes/cluster/ubuntu/skydns-rc.yaml

67 lines
1.8 KiB
YAML

apiVersion: v1beta3
kind: ReplicationController
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
name: kube-dns
namespace: default
spec:
replicas: 1
selector:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
name: kube-dns
spec:
containers:
- name: etcd
image: gcr.io/google_containers/etcd:2.0.9
command:
- /usr/local/bin/etcd
- --addr
- 127.0.0.1:4001
- --bind-addr
- 127.0.0.1:4001
- -initial-cluster-token=skydns-etcd
- name: kube2sky
image: gcr.io/google_containers/kube2sky:1.4
args:
# entrypoint = "/kube2sky"
- -domain=kubernetes.local
- -kubecfg_file=/etc/dns_token/kubeconfig
volumeMounts:
- mountPath: /etc/dns_token
name: dns-token
readOnly: true
- name: skydns
image: gcr.io/google_containers/skydns:2015-03-11-001
args:
# entrypoint = "/skydns"
- -machines=http://localhost:4001
- -addr=0.0.0.0:53
- -domain=kubernetes.local.
ports:
- containerPort: 53
name: dns
protocol: UDP
livenessProbe:
exec:
command:
- "/bin/sh"
- "-c"
# The health check succeeds by virtue of not hanging. It'd be nice
# to also check local services are known, but if that's broken then
# etcd or kube2sky has to be restarted, not skydns.
- "nslookup foobar 127.0.0.1 &> /dev/null; echo ok"
initialDelaySeconds: 30
timeoutSeconds: 5
dnsPolicy: Default # Don't use cluster DNS.
volumes:
- name: dns-token
secret:
secretName: token-system-dns