Move things into a 'kube-system' namespace.

This commit is contained in:
Brendan Burns
2015-06-24 21:55:40 -07:00
committed by Satnam Singh
parent c8f8e5f333
commit 988aa6fdf6
33 changed files with 100 additions and 56 deletions

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: ReplicationController
metadata:
name: elasticsearch-logging-v1
namespace: default
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: v1
@@ -20,7 +20,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.4
- image: gcr.io/google_containers/elasticsearch:1.5
name: elasticsearch-logging
resources:
limits:

View File

@@ -1,7 +1,8 @@
.PHONY: elasticsearch_logging_discovery build push
# Keep this one version ahead to help prevent accidental pushes.
TAG = 1.4
# The current value of the tag to be used for building and
# pushing an image to gcr.io
TAG = 1.5
build: elasticsearch_logging_discovery
docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .

View File

@@ -50,7 +50,7 @@ func main() {
// Look for endpoints associated with the Elasticsearch loggging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = c.Services(api.NamespaceDefault).Get("elasticsearch-logging")
elasticsearch, err = c.Services(api.NamespaceSystem).Get("elasticsearch-logging")
if err == nil {
break
}
@@ -67,7 +67,7 @@ func main() {
// Wait for some endpoints.
count := 0
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = c.Endpoints(api.NamespaceDefault).Get("elasticsearch-logging")
endpoints, err = c.Endpoints(api.NamespaceSystem).Get("elasticsearch-logging")
if err != nil {
continue
}

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: elasticsearch-logging
namespace: default
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"

View File

@@ -19,7 +19,7 @@
# The time_format specification below makes sure we properly
# parse the time format produced by Docker. This will be
# submitted to Elasticsearch and should appear like:
# $ curl 'http://elasticsearch-logging.default:9200/_search?pretty'
# $ curl 'http://elasticsearch-logging:9200/_search?pretty'
# ...
# {
# "_index" : "logstash-2014.09.25",
@@ -94,6 +94,21 @@
tag docker
</source>
<match kubernetes.**>
type elasticsearch
log_level info
include_tag_key true
host elasticsearch-logging
port 9200
logstash_format true
flush_interval 5s
# Never wait longer than 5 minutes between retries.
max_retry_wait 300
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
</match>
>>>>>>> Move things into a 'kube-system' namespace.
<source>
type tail
format none

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: ReplicationController
metadata:
name: kibana-logging-v1
namespace: default
namespace: kube-system
labels:
k8s-app: kibana-logging
version: v1

View File

@@ -2,7 +2,7 @@ apiVersion: v1
kind: Service
metadata:
name: kibana-logging
namespace: default
namespace: kube-system
labels:
k8s-app: kibana-logging
kubernetes.io/cluster-service: "true"