standard config opt names: logging

This commit is contained in:
Tim Hockin 2014-11-13 23:07:43 -08:00
parent 52ad94d766
commit 47141f05c7
17 changed files with 60 additions and 34 deletions

View File

@ -38,3 +38,7 @@ PORTAL_NET="10.0.0.0/16"
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true
# Optional: Install node logging
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp

View File

@ -22,9 +22,9 @@ mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
use-fluentd-es: $FLUENTD_ELASTICSEARCH
use-fluentd-gcp: $FLUENTD_GCP
enable_node_monitoring: $ENABLE_NODE_MONITORING
enable_node_logging: $ENABLE_NODE_LOGGING
logging_destination: $LOGGING_DESTINATION
EOF
mkdir -p /srv/salt-overlay/salt/nginx

View File

@ -256,8 +256,8 @@ function kube-up {
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
echo "readonly FLUENTD_ELASTICSEARCH='${FLUENTD_ELASTICSEARCH:-false}'"
echo "readonly FLUENTD_GCP='false'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/salt-master.sh"

View File

@ -42,13 +42,9 @@ ENABLE_NODE_MONITORING=true
# Optional: When set to true, heapster will be setup as part of the cluster bring up.
ENABLE_CLUSTER_MONITORING=true
# Turn on Elasticsearch logging unless Google Cloud Logging has been selected or
# if Elasticsearch logging has been specifically turned off.
if [[ "${FLUENTD_GCP-}" != "true" ]]; then
if [[ "${FLUENTD_ELASTICSEARCH-}" != "false" ]]; then
FLUENTD_ELASTICSEARCH="true"
fi
fi
# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
ENABLE_DOCKER_REGISTRY_CACHE=true
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp

View File

@ -41,4 +41,7 @@ ENABLE_DOCKER_REGISTRY_CACHE=true
ENABLE_NODE_MONITORING=true
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
ENABLE_CLUSTER_MONITORING=false

View File

@ -22,9 +22,9 @@ mkdir -p /srv/salt-overlay/pillar
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
use-fluentd-es: $FLUENTD_ELASTICSEARCH
use-fluentd-gcp: $FLUENTD_GCP
enable_node_monitoring: $ENABLE_NODE_MONITORING
enable_node_logging: $ENABLE_NODE_LOGGING
logging_destination: $LOGGING_DESTINATION
EOF
mkdir -p /srv/salt-overlay/salt/nginx

View File

@ -283,9 +283,9 @@ function kube-up {
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly FLUENTD_ELASTICSEARCH='${FLUENTD_ELASTICSEARCH:-false}'"
echo "readonly FLUENTD_GCP='${FLUENTD_GCP:-false}'"
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/download-release.sh"
@ -293,17 +293,13 @@ function kube-up {
) > "${KUBE_TEMP}/master-start.sh"
# Report logging choice (if any).
if [[ "${FLUENTD_ELASTICSEARCH-}" == "true" ]]; then
echo "+++ Logging using Fluentd to Elasticsearch"
fi
if [[ "${FLUENTD_GCP-}" == "true" ]]; then
echo "+++ Logging using Fluentd to Google Cloud Logging"
fi
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]]; then
echo "+++ Logging using Fluentd to ${LOGGING_DESTINATION:-unknown}"
# For logging to GCP we need to enable some minion scopes.
if [[ "${FLUENTD_GCP-}" == "true" ]]; then
if [[ "${LOGGING_DESTINATION-}" == "gcp" ]]; then
MINION_SCOPES="${MINION_SCOPES}, https://www.googleapis.com/auth/logging.write"
fi
fi
gcloud compute instances create "${MASTER_NAME}" \
--project "${PROJECT}" \

View File

@ -40,3 +40,7 @@ PORTAL_NET="10.0.0.0/16"
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch

View File

@ -185,6 +185,8 @@ rax-boot-minions() {
-e "s|INDEX|$((i + 1))|g" \
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\&}|" \
-e "s|ENABLE_NODE_MONITORING|${ENABLE_NODE_MONITORING:-false}|" \
-e "s|ENABLE_NODE_LOGGING|${ENABLE_NODE_LOGGING:-false}|" \
-e "s|LOGGING_DESTINATION|${LOGGING_DESTINATION:-}|" \
$(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml

View File

@ -11,11 +11,13 @@ base:
{% if pillar['enable_node_monitoring'] is defined and pillar['enable_node_monitoring'] %}
- cadvisor
{% endif %}
{% if pillar['use-fluentd-es'] is defined and pillar['use-fluentd-es'] %}
{% if pillar['enable_node_logging'] is defined and pillar['enable_node_logging'] %}
{% if pillar['logging_destination'] is defined and pillar['logging_destination'] == 'elasticsearch' %}
- fluentd-es
{% endif %}
{% if pillar['use-fluentd-gcp'] is defined and pillar['use-fluentd-gcp'] %}
{% if pillar['logging_destination'] is defined and pillar['logging_destination'] == 'gcp' %}
- fluentd-gcp
{% endif %}
{% endif %}
- logrotate
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}

View File

@ -40,3 +40,7 @@ done
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch

View File

@ -83,6 +83,8 @@ cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
portal_net: $PORTAL_NET
cert_ip: $MASTER_IP
enable_node_monitoring: $ENABLE_NODE_MONITORING
enable_node_logging: $ENABLE_NODE_LOGGING
logging_destination: $LOGGING_DESTINATION
EOF
# Configure the salt-master

View File

@ -35,3 +35,7 @@ PORTAL_NET="10.244.240.0/20"
# Optional: Install node monitoring.
ENABLE_NODE_MONITORING=true
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=true
LOGGING_DESTINATION=elasticsearch

View File

@ -23,6 +23,8 @@ cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
node_instance_prefix: $NODE_INSTANCE_PREFIX
portal_net: $PORTAL_NET
enable_node_monitoring: $ENABLE_NODE_MONITORING
enable_node_logging: $ENABLE_NODE_LOGGING
logging_destination: $LOGGING_DESTINATION
EOF
mkdir -p /srv/salt-overlay/salt/nginx

View File

@ -292,6 +292,8 @@ function kube-up {
echo "readonly NODE_INSTANCE_PREFIX='${INSTANCE_PREFIX}-minion'"
echo "readonly PORTAL_NET='${PORTAL_NET}'"
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
echo "readonly SERVER_BINARY_TAR='${SERVER_BINARY_TAR##*/}'"
echo "readonly SALT_TAR='${SALT_TAR##*/}'"
echo "readonly MASTER_HTPASSWD='${htpasswd}'"

View File

@ -1,6 +1,6 @@
# Makefile for a synthetic logger to be logged
# by GCP. The cluster must have been created with
# the environment variable FLUENTD_GCP set to 'true'.
# the variable LOGGING_DESTINATION=GCP.
.PHONY: up down logger-up logger-down get

View File

@ -5,11 +5,15 @@
### Logging with Fluentd and Elastiscsearch
To enable logging of the stdout and stderr output of every Docker container in
a Kubernetes cluster set the shell environment
variable ``FLUENTD_ELASTICSEARCH`` to ``true`` e.g. in bash:
a Kubernetes cluster set the shell environment variables
``ENABLE_NODE_LOGGING`` to ``true`` and ``LOGGING_DESTINATION`` to ``elasticsearch``.
e.g. in bash:
```
export FLUENTD_ELASTICSEARCH=true
export ENABLE_NODE_LOGGING=true
export LOGGING_DESTINATION=elasticsearch
```
This will instantiate a [Fluentd](http://www.fluentd.org/) instance on each node which will
collect all the Dcoker container log files. The collected logs will
be targetted at an [Elasticsearch](http://www.elasticsearch.org/) instance assumed to be running on the
@ -20,4 +24,5 @@ Elasticsearch service (more informaiton to follow shortly in the contrib directo
### Logging with Fluentd and Google Compute Platform
To enable logging of Docker contains in a cluster using Google Compute
Platform set the shell environment variable ``FLUENTD_GCP`` to ``true``.
Platform set the config flags ``ENABLE_NODE_LOGGING`` to ``true`` and
``LOGGING_DESTINATION`` to ``gcp``.