fixed conflict with the current upstream master
This commit is contained in:
@@ -1,20 +1,42 @@
|
||||
# Kubernetes Examples: HEAD
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<h1>*** PLEASE NOTE: These examples apply to the HEAD of the source
|
||||
tree only. If you are using a released version of Kubernetes, you almost
|
||||
certainly want the examples that go with that version.</h1>
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<strong>Examples for specific releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).</strong>
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
##
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Kubernetes Examples: releases.k8s.io/HEAD
|
||||
|
||||
This directory contains a number of different examples of how to run
|
||||
applications with Kubernetes.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,20 +1,60 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/aws_ebs/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
This is a simple web server pod which serves HTML from an AWS EBS
|
||||
volume.
|
||||
|
||||
Create a volume in the same region as your node add your volume
|
||||
information in the pod description file aws-ebs-web.yaml then create
|
||||
the pod:
|
||||
```shell
|
||||
$ kubectl create -f aws-ebs-web.yaml
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/aws_ebs/aws-ebs-web.yaml
|
||||
```
|
||||
|
||||
Add some data to the volume if is empty:
|
||||
```shell
|
||||
|
||||
```sh
|
||||
$ echo "Hello World" >& /var/lib/kubelet/plugins/kubernetes.io/aws-ebs/mounts/aws/{Region}/{Volume ID}/index.html
|
||||
```
|
||||
|
||||
You should now be able to query your web server:
|
||||
```shell
|
||||
|
||||
```sh
|
||||
$ curl <Pod IP address>
|
||||
$ Hello World
|
||||
````
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,4 +1,39 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/blog-logging/diagrams/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Diagrams for Cloud Logging Blog Article
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/cassandra/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Cloud Native Deployments of Cassandra using Kubernetes
|
||||
|
||||
The following document describes the development of a _cloud native_ [Cassandra](http://cassandra.apache.org/) deployment on Kubernetes. When we say _cloud native_ we mean an application which understands that it is running within a cluster manager, and uses this cluster management infrastructure to help implement the application. In particular, in this instance, a custom Cassandra ```SeedProvider``` is used to enable Cassandra to dynamically discover new Cassandra nodes as they join the cluster.
|
||||
@@ -5,15 +38,18 @@ The following document describes the development of a _cloud native_ [Cassandra]
|
||||
This document also attempts to describe the core components of Kubernetes: _Pods_, _Services_, and _Replication Controllers_.
|
||||
|
||||
### Prerequisites
|
||||
This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/getting-started-guides) for installation instructions for your platform.
|
||||
|
||||
This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting started](../../docs/getting-started-guides/) for installation instructions for your platform.
|
||||
|
||||
This example also has a few code and configuration files needed. To avoid typing these out, you can ```git clone``` the Kubernetes repository to you local computer.
|
||||
|
||||
### A note for the impatient
|
||||
|
||||
This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end.
|
||||
|
||||
### Simple Single Pod Cassandra Node
|
||||
In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes.
|
||||
|
||||
In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes.
|
||||
In this simple case, we define a single container running Cassandra for our pod:
|
||||
|
||||
```yaml
|
||||
@@ -56,14 +92,16 @@ spec:
|
||||
|
||||
There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later)
|
||||
|
||||
You may also note that we are setting some Cassandra parameters (```MAX_HEAP_SIZE``` and ```HEAP_NEWSIZE```) and adding information about the [namespace](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/namespaces.md). We also tell Kubernetes that the container exposes both the ```CQL``` and ```Thrift``` API ports. Finally, we tell the cluster manager that we need 0.5 cpu (0.5 core).
|
||||
You may also note that we are setting some Cassandra parameters (```MAX_HEAP_SIZE``` and ```HEAP_NEWSIZE```) and adding information about the [namespace](../../docs/user-guide/namespaces.md). We also tell Kubernetes that the container exposes both the ```CQL``` and ```Thrift``` API ports. Finally, we tell the cluster manager that we need 0.5 cpu (0.5 core).
|
||||
|
||||
In theory could create a single Cassandra pod right now but since `KubernetesSeedProvider` needs to learn what nodes are in the Cassandra deployment we need to create a service first.
|
||||
|
||||
### Cassandra Service
|
||||
In Kubernetes a _[Service](../../docs/services.md)_ describes a set of Pods that perform the same task. For example, the set of Pods in a Cassandra cluster can be a Kubernetes Service, or even just the single Pod we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set of Pods. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. This is the way that we use initially use Services with Cassandra.
|
||||
|
||||
In Kubernetes a _[Service](../../docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of Pods in a Cassandra cluster can be a Kubernetes Service, or even just the single Pod we created above. An important use for a Service is to create a load balancer which distributes traffic across members of the set of Pods. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods (or the single Pod we've already created) available via the Kubernetes API. This is the way that we use initially use Services with Cassandra.
|
||||
|
||||
Here is the service description:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
@@ -81,14 +119,15 @@ spec:
|
||||
The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service.
|
||||
|
||||
Create this service as follows:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f cassandra-service.yaml
|
||||
$ kubectl create -f examples/cassandra/cassandra-service.yaml
|
||||
```
|
||||
|
||||
Now, as the service is running, we can create the first Cassandra pod using the mentioned specification.
|
||||
|
||||
```sh
|
||||
$ kubectl create -f cassandra.yaml
|
||||
$ kubectl create -f examples/cassandra/cassandra.yaml
|
||||
```
|
||||
|
||||
After a few moments, you should be able to see the pod running, plus its single container:
|
||||
@@ -129,9 +168,10 @@ subsets:
|
||||
```
|
||||
|
||||
### Adding replicated nodes
|
||||
|
||||
Of course, a single node cluster isn't particularly interesting. The real power of Kubernetes and Cassandra lies in easily building a replicated, scalable Cassandra cluster.
|
||||
|
||||
In Kubernetes a _[Replication Controller](../../docs/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
In Kubernetes a _[Replication Controller](../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
|
||||
Replication controllers will "adopt" existing pods that match their selector query, so let's create a replication controller with a single replica to adopt our existing Cassandra pod.
|
||||
|
||||
@@ -186,12 +226,13 @@ Most of this replication controller definition is identical to the Cassandra pod
|
||||
Create this controller:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f cassandra-controller.yaml
|
||||
$ kubectl create -f examples/cassandra/cassandra-controller.yaml
|
||||
```
|
||||
|
||||
Now this is actually not that interesting, since we haven't actually done anything new. Now it will get interesting.
|
||||
|
||||
Let's scale our cluster to 2:
|
||||
|
||||
```sh
|
||||
$ kubectl scale rc cassandra --replicas=2
|
||||
```
|
||||
@@ -221,11 +262,13 @@ UN 10.244.3.3 51.28 KB 256 100.0% dafe3154-1d67-42e1-ac1d-78e
|
||||
```
|
||||
|
||||
Now let's scale our cluster to 4 nodes:
|
||||
|
||||
```sh
|
||||
$ kubectl scale rc cassandra --replicas=4
|
||||
```
|
||||
|
||||
In a few moments, you can examine the status again:
|
||||
|
||||
```sh
|
||||
$ kubectl exec -ti cassandra -- nodetool status
|
||||
Datacenter: datacenter1
|
||||
@@ -240,18 +283,18 @@ UN 10.244.3.3 51.28 KB 256 51.0% dafe3154-1d67-42e1-ac1d-78e
|
||||
```
|
||||
|
||||
### tl; dr;
|
||||
|
||||
For those of you who are impatient, here is the summary of the commands we ran in this tutorial.
|
||||
|
||||
```sh
|
||||
|
||||
# create a service to track all cassandra nodes
|
||||
kubectl create -f cassandra-service.yaml
|
||||
kubectl create -f examples/cassandra/cassandra-service.yaml
|
||||
|
||||
# create a single cassandra node
|
||||
kubectl create -f cassandra.yaml
|
||||
kubectl create -f examples/cassandra/cassandra.yaml
|
||||
|
||||
# create a replication controller to replicate cassandra nodes
|
||||
kubectl create -f cassandra-controller.yaml
|
||||
kubectl create -f examples/cassandra/cassandra-controller.yaml
|
||||
|
||||
# scale up to 2 nodes
|
||||
kubectl scale rc cassandra --replicas=2
|
||||
@@ -265,6 +308,9 @@ kubectl scale rc cassandra --replicas=4
|
||||
|
||||
### Seed Provider Source
|
||||
|
||||
See [here](./java/src/io/k8s/cassandra/KubernetesSeedProvider.java).
|
||||
See [here](java/src/io/k8s/cassandra/KubernetesSeedProvider.java).
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -12,7 +12,7 @@ RUN gpg --keyserver pgp.mit.edu --recv-keys 0353B12C
|
||||
RUN gpg --export --armor 0353B12C | apt-key add -
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get -qq -y install cassandra
|
||||
RUN apt-get -qq -y install procps cassandra
|
||||
|
||||
COPY cassandra.yaml /etc/cassandra/cassandra.yaml
|
||||
COPY run.sh /run.sh
|
||||
|
@@ -100,7 +100,7 @@ public class KubernetesSeedProvider implements SeedProvider {
|
||||
|
||||
public List<InetAddress> getSeeds() {
|
||||
List<InetAddress> list = new ArrayList<InetAddress>();
|
||||
String host = "https://kubernetes.default.cluster.local";
|
||||
String host = "https://kubernetes.default.svc.cluster.local";
|
||||
String serviceName = getEnvOrDefault("CASSANDRA_SERVICE", "cassandra");
|
||||
String podNamespace = getEnvOrDefault("POD_NAMESPACE", "default");
|
||||
String path = String.format("/api/v1/namespaces/%s/endpoints/", podNamespace);
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/celery-rabbitmq/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Example: Distributed task queues with Celery, RabbitMQ and Flower
|
||||
|
||||
## Introduction
|
||||
@@ -24,7 +57,7 @@ At the end of the example, we will have:
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one minion (e.g. by setting your `NUM_MINIONS` environment variable to 2 or more).
|
||||
You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_MINIONS` environment variable to 2 or more).
|
||||
|
||||
|
||||
## Step 1: Start the RabbitMQ service
|
||||
@@ -50,7 +83,7 @@ spec:
|
||||
|
||||
To start the service, run:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl create -f examples/celery-rabbitmq/rabbitmq-service.yaml
|
||||
```
|
||||
|
||||
@@ -196,6 +229,7 @@ On GCE this can be done with:
|
||||
```
|
||||
$ gcloud compute firewall-rules create --allow=tcp:5555 --target-tags=kubernetes-minion kubernetes-minion-5555
|
||||
```
|
||||
|
||||
Please remember to delete the rule after you are done with the example (on GCE: `$ gcloud compute firewall-rules delete kubernetes-minion-5555`)
|
||||
|
||||
To bring up the pods, run this command `$ kubectl create -f examples/celery-rabbitmq/flower-controller.yaml`. This controller is defined as so:
|
||||
@@ -220,15 +254,12 @@ spec:
|
||||
containers:
|
||||
- image: endocode/flower
|
||||
name: flower
|
||||
ports:
|
||||
- containerPort: 5555
|
||||
hostPort: 5555
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
```
|
||||
|
||||
This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed. This image uses the following command to start Flower:
|
||||
This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed through the service endpoint. This image uses the following command to start Flower:
|
||||
|
||||
```sh
|
||||
flower --broker=amqp://guest:guest@${RABBITMQ_SERVICE_SERVICE_HOST:localhost}:5672//
|
||||
@@ -257,5 +288,6 @@ Point your internet browser to the appropriate flower-service address, port 5555
|
||||
If you click on the tab called "Tasks", you should see an ever-growing list of tasks called "celery_conf.add" which the run\_tasks.py script is dispatching.
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -17,9 +17,6 @@ spec:
|
||||
containers:
|
||||
- image: endocode/flower
|
||||
name: flower
|
||||
ports:
|
||||
- containerPort: 5555
|
||||
hostPort: 5555
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
|
@@ -1,28 +1,61 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/cluster-dns/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Kubernetes DNS example
|
||||
|
||||
This is a toy example demonstrating how to use kubernetes DNS.
|
||||
|
||||
### Step Zero: Prerequisites
|
||||
|
||||
This example assumes that you have forked the repository and [turned up a Kubernetes cluster](../../docs/getting-started-guides). Make sure DNS is enabled in your setup, see [DNS doc](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/cluster/addons/dns).
|
||||
This example assumes that you have forked the repository and [turned up a Kubernetes cluster](../../docs/getting-started-guides/). Make sure DNS is enabled in your setup, see [DNS doc](../../cluster/addons/dns/).
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ cd kubernetes
|
||||
$ hack/dev-build-and-up.sh
|
||||
```
|
||||
|
||||
### Step One: Create two namespaces
|
||||
|
||||
We'll see how cluster DNS works across multiple [namespaces](../../docs/namespaces.md), first we need to create two namespaces:
|
||||
We'll see how cluster DNS works across multiple [namespaces](../../docs/user-guide/namespaces.md), first we need to create two namespaces:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl create -f examples/cluster-dns/namespace-dev.yaml
|
||||
$ kubectl create -f examples/cluster-dns/namespace-prod.yaml
|
||||
```
|
||||
|
||||
Now list all namespaces:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get namespaces
|
||||
NAME LABELS STATUS
|
||||
default <none> Active
|
||||
@@ -32,7 +65,7 @@ production name=production Active
|
||||
|
||||
For kubectl client to work with each namespace, we define two contexts:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl config set-context dev --namespace=development --cluster=${CLUSTER_NAME} --user=${USER_NAME}
|
||||
$ kubectl config set-context prod --namespace=production --cluster=${CLUSTER_NAME} --user=${USER_NAME}
|
||||
```
|
||||
@@ -41,16 +74,16 @@ You can view your cluster name and user name in kubernetes config at ~/.kube/con
|
||||
|
||||
### Step Two: Create backend replication controller in each namespace
|
||||
|
||||
Use the file [`examples/cluster-dns/dns-backend-rc.yaml`](dns-backend-rc.yaml) to create a backend server [replication controller](../../docs/replication-controller.md) in each namespace.
|
||||
Use the file [`examples/cluster-dns/dns-backend-rc.yaml`](dns-backend-rc.yaml) to create a backend server [replication controller](../../docs/user-guide/replication-controller.md) in each namespace.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl config use-context dev
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
|
||||
```
|
||||
|
||||
Once that's up you can list the pod in the cluster:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
dns-backend dns-backend ddysher/dns-backend name=dns-backend 1
|
||||
@@ -58,7 +91,7 @@ dns-backend dns-backend ddysher/dns-backend name=dns-backend 1
|
||||
|
||||
Now repeat the above commands to create a replication controller in prod namespace:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl config use-context prod
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
|
||||
$ kubectl get rc
|
||||
@@ -69,16 +102,16 @@ dns-backend dns-backend ddysher/dns-backend name=dns-backend 1
|
||||
### Step Three: Create backend service
|
||||
|
||||
Use the file [`examples/cluster-dns/dns-backend-service.yaml`](dns-backend-service.yaml) to create
|
||||
a [service](../../docs/services.md) for the backend server.
|
||||
a [service](../../docs/user-guide/services.md) for the backend server.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl config use-context dev
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||
```
|
||||
|
||||
Once that's up you can list the service in the cluster:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get service dns-backend
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
dns-backend <none> name=dns-backend 10.0.236.129 8000/TCP
|
||||
@@ -86,7 +119,7 @@ dns-backend <none> name=dns-backend 10.0.236.129 8000/TCP
|
||||
|
||||
Again, repeat the same process for prod namespace:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl config use-context prod
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||
$ kubectl get service dns-backend
|
||||
@@ -96,16 +129,16 @@ dns-backend <none> name=dns-backend 10.0.35.246 8000/TCP
|
||||
|
||||
### Step Four: Create client pod in one namespace
|
||||
|
||||
Use the file [`examples/cluster-dns/dns-frontend-pod.yaml`](dns-frontend-pod.yaml) to create a client [pod](../../docs/pods.md) in dev namespace. The client pod will make a connection to backend and exit. Specifically, it tries to connect to address `http://dns-backend.development.cluster.local:8000`.
|
||||
Use the file [`examples/cluster-dns/dns-frontend-pod.yaml`](dns-frontend-pod.yaml) to create a client [pod](../../docs/user-guide/pods.md) in dev namespace. The client pod will make a connection to backend and exit. Specifically, it tries to connect to address `http://dns-backend.development.cluster.local:8000`.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl config use-context dev
|
||||
$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
|
||||
```
|
||||
|
||||
Once that's up you can list the pod in the cluster:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get pods dns-frontend
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dns-frontend 0/1 ExitCode:0 0 1m
|
||||
@@ -113,7 +146,7 @@ dns-frontend 0/1 ExitCode:0 0 1m
|
||||
|
||||
Wait until the pod succeeds, then we can see the output from the client pod:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl logs dns-frontend
|
||||
2015-05-07T20:13:54.147664936Z 10.0.236.129
|
||||
2015-05-07T20:13:54.147721290Z Send request to: http://dns-backend.development.cluster.local:8000
|
||||
@@ -121,11 +154,11 @@ $ kubectl logs dns-frontend
|
||||
2015-05-07T20:13:54.147738295Z Hello World!
|
||||
```
|
||||
|
||||
Please refer to the [source code](./images/frontend/client.py) about the log. First line prints out the ip address associated with the service in dev namespace; remaining lines print out our request and server response.
|
||||
Please refer to the [source code](images/frontend/client.py) about the log. First line prints out the ip address associated with the service in dev namespace; remaining lines print out our request and server response.
|
||||
|
||||
If we switch to prod namespace with the same pod config, we'll see the same result, i.e. dns will resolve across namespace.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl config use-context prod
|
||||
$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
|
||||
$ kubectl logs dns-frontend
|
||||
@@ -138,10 +171,11 @@ $ kubectl logs dns-frontend
|
||||
|
||||
#### Note about default namespace
|
||||
|
||||
If you prefer not using namespace, then all your services can be addressed using `default` namespace, e.g. `http://dns-backend.default.cluster.local:8000`, or shorthand version `http://dns-backend:8000`
|
||||
If you prefer not using namespace, then all your services can be addressed using `default` namespace, e.g. `http://dns-backend.default.svc.cluster.local:8000`, or shorthand version `http://dns-backend:8000`
|
||||
|
||||
|
||||
### tl; dr;
|
||||
|
||||
For those of you who are impatient, here is the summary of the commands we ran in this tutorial. Remember to set first `$CLUSTER_NAME` and `$USER_NAME` to the values found in `~/.kube/config`.
|
||||
|
||||
```sh
|
||||
@@ -176,4 +210,6 @@ kubectl logs dns-frontend
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,36 +0,0 @@
|
||||
# Downward API example
|
||||
|
||||
Following this example, you will create a pod with a containers that consumes the pod's name and
|
||||
namespace using the [downward API](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/downward_api.md).
|
||||
|
||||
## Step Zero: Prerequisites
|
||||
|
||||
This example assumes you have a Kubernetes cluster installed and running, and that you have
|
||||
installed the ```kubectl``` command line tool somewhere in your path. Please see the [getting
|
||||
started](../../docs/getting-started-guides) for installation instructions for your platform.
|
||||
|
||||
## Step One: Create the pod
|
||||
|
||||
Containers consume the downward API using environment variables. The downward API allows
|
||||
containers to be injected with the name and namespace of the pod the container is in.
|
||||
|
||||
Use the [`examples/downward-api/dapi-pod.yaml`](dapi-pod.yaml) file to create a Pod with a container that consumes the
|
||||
downward API.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f examples/downward-api/dapi-pod.yaml
|
||||
```
|
||||
|
||||
### Examine the logs
|
||||
|
||||
This pod runs the `env` command in a container that consumes the downward API. You can grep
|
||||
through the pod logs to see that the pod was injected with the correct values:
|
||||
|
||||
```shell
|
||||
$ kubectl logs dapi-test-pod | grep POD_
|
||||
2015-04-30T20:22:18.568024817Z POD_NAME=dapi-test-pod
|
||||
2015-04-30T20:22:18.568087688Z POD_NAMESPACE=default
|
||||
```
|
||||
|
||||
|
||||
[]()
|
@@ -1,19 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: dapi-test-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: test-container
|
||||
image: gcr.io/google_containers/busybox
|
||||
command: [ "/bin/sh", "-c", "env" ]
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
restartPolicy: Never
|
@@ -1,21 +1,55 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/elasticsearch/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Elasticsearch for Kubernetes
|
||||
|
||||
This directory contains the source for a Docker image that creates an instance
|
||||
of [Elasticsearch](https://www.elastic.co/products/elasticsearch) 1.5.2 which can
|
||||
be used to automatically form clusters when used
|
||||
with [replication controllers](../../docs/replication-controller.md). This will not work with the library Elasticsearch image
|
||||
with [replication controllers](../../docs/user-guide/replication-controller.md). This will not work with the library Elasticsearch image
|
||||
because multicast discovery will not find the other pod IPs needed to form a cluster. This
|
||||
image detects other Elasticsearch [pods](../../docs/pods.md) running in a specified [namespace](../../docs/namespaces.md) with a given
|
||||
image detects other Elasticsearch [pods](../../docs/user-guide/pods.md) running in a specified [namespace](../../docs/user-guide/namespaces.md) with a given
|
||||
label selector. The detected instances are used to form a list of peer hosts which
|
||||
are used as part of the unicast discovery mechansim for Elasticsearch. The detection
|
||||
of the peer nodes is done by a program which communicates with the Kubernetes API
|
||||
server to get a list of matching Elasticsearch pods. To enable authenticated
|
||||
communication this image needs a [secret](../../docs/secrets.md) to be mounted at `/etc/apiserver-secret`
|
||||
communication this image needs a [secret](../../docs/user-guide/secrets.md) to be mounted at `/etc/apiserver-secret`
|
||||
with the basic authentication username and password.
|
||||
|
||||
Here is an example replication controller specification that creates 4 instances of Elasticsearch which is in the file
|
||||
[music-rc.yaml](music-rc.yaml).
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
@@ -56,6 +90,7 @@ spec:
|
||||
secret:
|
||||
secretName: apiserver-secret
|
||||
```
|
||||
|
||||
The `CLUSTER_NAME` variable gives a name to the cluster and allows multiple separate clusters to
|
||||
exist in the same namespace.
|
||||
The `SELECTOR` variable should be set to a label query that identifies the Elasticsearch
|
||||
@@ -67,7 +102,8 @@ for the replication controller (in this case `mytunes`).
|
||||
|
||||
Before creating pods with the replication controller a secret containing the bearer authentication token
|
||||
should be set up. A template is provided in the file [apiserver-secret.yaml](apiserver-secret.yaml):
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
@@ -75,11 +111,12 @@ metadata:
|
||||
namespace: NAMESPACE
|
||||
data:
|
||||
token: "TOKEN"
|
||||
|
||||
```
|
||||
|
||||
Replace `NAMESPACE` with the actual namespace to be used and `TOKEN` with the basic64 encoded
|
||||
versions of the bearer token reported by `kubectl config view` e.g.
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl config view
|
||||
...
|
||||
- name: kubernetes-logging_kubernetes-basic-auth
|
||||
@@ -88,10 +125,11 @@ $ kubectl config view
|
||||
...
|
||||
$ echo yGlDcMvSZPX4PyP0Q5bHgAYgi1iyEHv2 | base64
|
||||
eUdsRGNNdlNaUFg0UHlQMFE1YkhnQVlnaTFpeUVIdjIK=
|
||||
```
|
||||
|
||||
```
|
||||
resulting in the file:
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
@@ -99,23 +137,26 @@ metadata:
|
||||
namespace: mytunes
|
||||
data:
|
||||
token: "eUdsRGNNdlNaUFg0UHlQMFE1YkhnQVlnaTFpeUVIdjIK="
|
||||
|
||||
```
|
||||
|
||||
which can be used to create the secret in your namespace:
|
||||
```
|
||||
kubectl create -f apiserver-secret.yaml --namespace=mytunes
|
||||
|
||||
```console
|
||||
kubectl create -f examples/elasticsearch/apiserver-secret.yaml --namespace=mytunes
|
||||
secrets/apiserver-secret
|
||||
|
||||
```
|
||||
|
||||
Now you are ready to create the replication controller which will then create the pods:
|
||||
```
|
||||
$ kubectl create -f music-rc.yaml --namespace=mytunes
|
||||
replicationcontrollers/music-db
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/elasticsearch/music-rc.yaml --namespace=mytunes
|
||||
replicationcontrollers/music-db
|
||||
```
|
||||
It's also useful to have a [service](../../docs/services.md) with an load balancer for accessing the Elasticsearch
|
||||
|
||||
It's also useful to have a [service](../../docs/user-guide/services.md) with an load balancer for accessing the Elasticsearch
|
||||
cluster which can be found in the file [music-service.yaml](music-service.yaml).
|
||||
```
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
@@ -132,14 +173,17 @@ spec:
|
||||
targetPort: es
|
||||
type: LoadBalancer
|
||||
```
|
||||
Let's create the service with an external load balancer:
|
||||
```
|
||||
$ kubectl create -f music-service.yaml --namespace=mytunes
|
||||
services/music-server
|
||||
|
||||
Let's create the service with an external load balancer:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/elasticsearch/music-service.yaml --namespace=mytunes
|
||||
services/music-server
|
||||
```
|
||||
|
||||
Let's see what we've got:
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl get pods,rc,services,secrets --namespace=mytunes
|
||||
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
@@ -155,8 +199,10 @@ music-server name=music-db name=music-db 10.0.45.177 9200/TCP
|
||||
NAME TYPE DATA
|
||||
apiserver-secret Opaque 1
|
||||
```
|
||||
|
||||
This shows 4 instances of Elasticsearch running. After making sure that port 9200 is accessible for this cluster (e.g. using a firewall rule for Google Compute Engine) we can make queries via the service which will be fielded by the matching Elasticsearch pods.
|
||||
```
|
||||
|
||||
```console
|
||||
$ curl 104.197.12.157:9200
|
||||
{
|
||||
"status" : 200,
|
||||
@@ -186,8 +232,10 @@ $ curl 104.197.12.157:9200
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
```
|
||||
|
||||
We can query the nodes to confirm that an Elasticsearch cluster has been formed.
|
||||
```
|
||||
|
||||
```console
|
||||
$ curl 104.197.12.157:9200/_nodes?pretty=true
|
||||
{
|
||||
"cluster_name" : "mytunes-db",
|
||||
@@ -229,8 +277,10 @@ $ curl 104.197.12.157:9200/_nodes?pretty=true
|
||||
"hosts" : [ "10.244.2.48", "10.244.0.24", "10.244.3.31", "10.244.1.37" ]
|
||||
...
|
||||
```
|
||||
|
||||
Let's ramp up the number of Elasticsearch nodes from 4 to 10:
|
||||
```
|
||||
|
||||
```console
|
||||
$ kubectl scale --replicas=10 replicationcontrollers music-db --namespace=mytunes
|
||||
scaled
|
||||
$ kubectl get pods --namespace=mytunes
|
||||
@@ -245,10 +295,11 @@ music-db-u1ru3 1/1 Running 0 38s
|
||||
music-db-wnss2 1/1 Running 0 1m
|
||||
music-db-x7j2w 1/1 Running 0 1m
|
||||
music-db-zjqyv 1/1 Running 0 1m
|
||||
```
|
||||
|
||||
```
|
||||
Let's check to make sure that these 10 nodes are part of the same Elasticsearch cluster:
|
||||
```
|
||||
|
||||
```console
|
||||
$ curl 104.197.12.157:9200/_nodes?pretty=true | grep name
|
||||
"cluster_name" : "mytunes-db",
|
||||
"name" : "Killraven",
|
||||
@@ -301,7 +352,9 @@ $ curl 104.197.12.157:9200/_nodes?pretty=true | grep name
|
||||
"name" : "mytunes-db"
|
||||
"vm_name" : "OpenJDK 64-Bit Server VM",
|
||||
"name" : "eth0",
|
||||
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,92 +0,0 @@
|
||||
Environment Guide Example
|
||||
=========================
|
||||
This example demonstrates running pods, replication controllers, and
|
||||
services. It shows two types of pods: frontend and backend, with
|
||||
services on top of both. Accessing the frontend pod will return
|
||||
environment information about itself, and a backend pod that it has
|
||||
accessed through the service. The goal is to illuminate the
|
||||
environment metadata available to running containers inside the
|
||||
Kubernetes cluster. The documentation for the kubernetes environment
|
||||
is [here](/docs/container-environment.md).
|
||||
|
||||

|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
This example assumes that you have a Kubernetes cluster installed and
|
||||
running, and that you have installed the `kubectl` command line tool
|
||||
somewhere in your path. Please see the [getting
|
||||
started](/docs/getting-started-guides) for installation instructions
|
||||
for your platform.
|
||||
|
||||
Optional: Build your own containers
|
||||
-----------------------------------
|
||||
The code for the containers is under
|
||||
[containers/](containers)
|
||||
|
||||
Get everything running
|
||||
----------------------
|
||||
|
||||
kubectl create -f ./backend-rc.yaml
|
||||
kubectl create -f ./backend-srv.yaml
|
||||
kubectl create -f ./show-rc.yaml
|
||||
kubectl create -f ./show-srv.yaml
|
||||
|
||||
Query the service
|
||||
-----------------
|
||||
Use `kubectl describe service show-srv` to determine the public IP of
|
||||
your service.
|
||||
|
||||
> Note: If your platform does not support external load balancers,
|
||||
you'll need to open the proper port and direct traffic to the
|
||||
internal IP shown for the frontend service with the above command
|
||||
|
||||
Run `curl <public ip>:80` to query the service. You should get
|
||||
something like this back:
|
||||
|
||||
```
|
||||
Pod Name: show-rc-xxu6i
|
||||
Pod Namespace: default
|
||||
USER_VAR: important information
|
||||
|
||||
Kubenertes environment variables
|
||||
BACKEND_SRV_SERVICE_HOST = 10.147.252.185
|
||||
BACKEND_SRV_SERVICE_PORT = 5000
|
||||
KUBERNETES_RO_SERVICE_HOST = 10.147.240.1
|
||||
KUBERNETES_RO_SERVICE_PORT = 80
|
||||
KUBERNETES_SERVICE_HOST = 10.147.240.2
|
||||
KUBERNETES_SERVICE_PORT = 443
|
||||
KUBE_DNS_SERVICE_HOST = 10.147.240.10
|
||||
KUBE_DNS_SERVICE_PORT = 53
|
||||
|
||||
Found backend ip: 10.147.252.185 port: 5000
|
||||
Response from backend
|
||||
Backend Container
|
||||
Backend Pod Name: backend-rc-6qiya
|
||||
Backend Namespace: default
|
||||
```
|
||||
|
||||
First the frontend pod's information is printed. The pod name and
|
||||
[namespace](/docs/design/namespaces.md) are retreived from the
|
||||
[Downward API](/docs/downward_api.md). Next, `USER_VAR` is the name of
|
||||
an environment variable set in the [pod
|
||||
definition](show-rc.yaml). Then, the dynamic kubernetes environment
|
||||
variables are scanned and printed. These are used to find the backend
|
||||
service, named `backend-srv`. Finally, the frontend pod queries the
|
||||
backend service and prints the information returned. Again the backend
|
||||
pod returns its own pod name and namespace.
|
||||
|
||||
Try running the `curl` command a few times, and notice what
|
||||
changes. Ex: `watch -n 1 curl -s <ip>` Firstly, the frontend service
|
||||
is directing your request to different frontend pods each time. The
|
||||
frontend pods are always contacting the backend through the backend
|
||||
service. This results in a different backend pod servicing each
|
||||
request as well.
|
||||
|
||||
Cleanup
|
||||
-------
|
||||
kubectl delete rc,service -l type=show-type
|
||||
kubectl delete rc,service -l type=backend-type
|
||||
|
||||
|
||||
[]()
|
@@ -1,30 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: backend-rc
|
||||
labels:
|
||||
type: backend-type
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
type: backend-type
|
||||
spec:
|
||||
containers:
|
||||
- name: backend-container
|
||||
image: gcr.io/google-samples/env-backend:1.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
@@ -1,13 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: backend-srv
|
||||
labels:
|
||||
type: backend-type
|
||||
spec:
|
||||
ports:
|
||||
- port: 5000
|
||||
protocol: TCP
|
||||
selector:
|
||||
type: backend-type
|
@@ -1,23 +0,0 @@
|
||||
Building
|
||||
--------
|
||||
For each container, the build steps are the same. The examples below
|
||||
are for the `show` container. Replace `show` with `backend` for the
|
||||
backend container.
|
||||
|
||||
GCR
|
||||
---
|
||||
docker build -t gcr.io/<project-name>/show .
|
||||
gcloud docker push gcr.io/<project-name>/show
|
||||
|
||||
Docker Hub
|
||||
----------
|
||||
docker build -t <username>/show .
|
||||
docker push <username>/show
|
||||
|
||||
Change Pod Definitions
|
||||
----------------------
|
||||
Edit both `show-rc.yaml` and `backend-rc.yaml` and replace the
|
||||
specified `image:` with the one that you built.
|
||||
|
||||
|
||||
[]()
|
@@ -1,2 +0,0 @@
|
||||
FROM golang:onbuild
|
||||
EXPOSE 8080
|
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
func printInfo(resp http.ResponseWriter, req *http.Request) {
|
||||
name := os.Getenv("POD_NAME")
|
||||
namespace := os.Getenv("POD_NAMESPACE")
|
||||
fmt.Fprintf(resp, "Backend Container\n")
|
||||
fmt.Fprintf(resp, "Backend Pod Name: %v\n", name)
|
||||
fmt.Fprintf(resp, "Backend Namespace: %v\n", namespace)
|
||||
}
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", printInfo)
|
||||
log.Fatal(http.ListenAndServe(":5000", nil))
|
||||
}
|
@@ -1,2 +0,0 @@
|
||||
FROM golang:onbuild
|
||||
EXPOSE 8080
|
@@ -1,95 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func getKubeEnv() (map[string]string, error) {
|
||||
environS := os.Environ()
|
||||
environ := make(map[string]string)
|
||||
for _, val := range environS {
|
||||
split := strings.Split(val, "=")
|
||||
if len(split) != 2 {
|
||||
return environ, fmt.Errorf("Some weird env vars")
|
||||
}
|
||||
environ[split[0]] = split[1]
|
||||
}
|
||||
for key := range environ {
|
||||
if !(strings.HasSuffix(key, "_SERVICE_HOST") ||
|
||||
strings.HasSuffix(key, "_SERVICE_PORT")) {
|
||||
delete(environ, key)
|
||||
}
|
||||
}
|
||||
return environ, nil
|
||||
}
|
||||
|
||||
func printInfo(resp http.ResponseWriter, req *http.Request) {
|
||||
kubeVars, err := getKubeEnv()
|
||||
if err != nil {
|
||||
http.Error(resp, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
backendHost := os.Getenv("BACKEND_SRV_SERVICE_HOST")
|
||||
backendPort := os.Getenv("BACKEND_SRV_SERVICE_PORT")
|
||||
backendRsp, backendErr := http.Get(fmt.Sprintf(
|
||||
"http://%v:%v/",
|
||||
backendHost,
|
||||
backendPort))
|
||||
if backendErr == nil {
|
||||
defer backendRsp.Body.Close()
|
||||
}
|
||||
|
||||
name := os.Getenv("POD_NAME")
|
||||
namespace := os.Getenv("POD_NAMESPACE")
|
||||
fmt.Fprintf(resp, "Pod Name: %v \n", name)
|
||||
fmt.Fprintf(resp, "Pod Namespace: %v \n", namespace)
|
||||
|
||||
envvar := os.Getenv("USER_VAR")
|
||||
fmt.Fprintf(resp, "USER_VAR: %v \n", envvar)
|
||||
|
||||
fmt.Fprintf(resp, "\nKubenertes environment variables\n")
|
||||
var keys []string
|
||||
for key := range kubeVars {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, key := range keys {
|
||||
fmt.Fprintf(resp, "%v = %v \n", key, kubeVars[key])
|
||||
}
|
||||
|
||||
fmt.Fprintf(resp, "\nFound backend ip: %v port: %v\n", backendHost, backendPort)
|
||||
if backendErr == nil {
|
||||
fmt.Fprintf(resp, "Response from backend\n")
|
||||
io.Copy(resp, backendRsp.Body)
|
||||
} else {
|
||||
fmt.Fprintf(resp, "Error from backend: %v", backendErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
http.HandleFunc("/", printInfo)
|
||||
log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
}
|
Binary file not shown.
Before Width: | Height: | Size: 18 KiB |
@@ -1,32 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: show-rc
|
||||
labels:
|
||||
type: show-type
|
||||
spec:
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
type: show-type
|
||||
spec:
|
||||
containers:
|
||||
- name: show-container
|
||||
image: gcr.io/google-samples/env-show:1.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
env:
|
||||
- name: USER_VAR
|
||||
value: important information
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
@@ -1,15 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: show-srv
|
||||
labels:
|
||||
type: show-type
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
selector:
|
||||
type: show-type
|
@@ -141,8 +141,7 @@ func walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error
|
||||
func TestExampleObjectSchemas(t *testing.T) {
|
||||
cases := map[string]map[string]runtime.Object{
|
||||
"../cmd/integration": {
|
||||
"v1beta3-controller": &api.ReplicationController{},
|
||||
"v1-controller": &api.ReplicationController{},
|
||||
"v1-controller": &api.ReplicationController{},
|
||||
},
|
||||
"../examples/guestbook": {
|
||||
"frontend-controller": &api.ReplicationController{},
|
||||
@@ -160,30 +159,31 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"redis-master-service": &api.Service{},
|
||||
"redis-slave-service": &api.Service{},
|
||||
},
|
||||
"../examples/walkthrough": {
|
||||
"pod1": &api.Pod{},
|
||||
"pod2": &api.Pod{},
|
||||
"../docs/user-guide/walkthrough": {
|
||||
"pod-nginx": &api.Pod{},
|
||||
"pod-nginx-with-label": &api.Pod{},
|
||||
"pod-redis": &api.Pod{},
|
||||
"pod-with-http-healthcheck": &api.Pod{},
|
||||
"service": &api.Service{},
|
||||
"replication-controller": &api.ReplicationController{},
|
||||
"podtemplate": &api.PodTemplate{},
|
||||
},
|
||||
"../examples/update-demo": {
|
||||
"../docs/user-guide/update-demo": {
|
||||
"kitten-rc": &api.ReplicationController{},
|
||||
"nautilus-rc": &api.ReplicationController{},
|
||||
},
|
||||
"../examples/persistent-volumes/volumes": {
|
||||
"../docs/user-guide/persistent-volumes/volumes": {
|
||||
"local-01": &api.PersistentVolume{},
|
||||
"local-02": &api.PersistentVolume{},
|
||||
"gce": &api.PersistentVolume{},
|
||||
"nfs": &api.PersistentVolume{},
|
||||
},
|
||||
"../examples/persistent-volumes/claims": {
|
||||
"../docs/user-guide/persistent-volumes/claims": {
|
||||
"claim-01": &api.PersistentVolumeClaim{},
|
||||
"claim-02": &api.PersistentVolumeClaim{},
|
||||
"claim-03": &api.PersistentVolumeClaim{},
|
||||
},
|
||||
"../examples/persistent-volumes/simpletest": {
|
||||
"../docs/user-guide/persistent-volumes/simpletest": {
|
||||
"namespace": &api.Namespace{},
|
||||
"pod": &api.Pod{},
|
||||
"service": &api.Service{},
|
||||
@@ -195,14 +195,16 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"glusterfs-pod": &api.Pod{},
|
||||
"glusterfs-endpoints": &api.Endpoints{},
|
||||
},
|
||||
"../examples/liveness": {
|
||||
"../docs/user-guide/liveness": {
|
||||
"exec-liveness": &api.Pod{},
|
||||
"http-liveness": &api.Pod{},
|
||||
},
|
||||
"../docs/user-guide": {
|
||||
"multi-pod": nil,
|
||||
"pod": &api.Pod{},
|
||||
"replication": &api.ReplicationController{},
|
||||
},
|
||||
"../examples": {
|
||||
"multi-pod": nil,
|
||||
"pod": &api.Pod{},
|
||||
"replication": &api.ReplicationController{},
|
||||
"scheduler-policy-config": &schedulerapi.Policy{},
|
||||
},
|
||||
"../examples/rbd/secret": {
|
||||
@@ -231,7 +233,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"namespace-dev": &api.Namespace{},
|
||||
"namespace-prod": &api.Namespace{},
|
||||
},
|
||||
"../examples/downward-api": {
|
||||
"../docs/user-guide/downward-api": {
|
||||
"dapi-pod": &api.Pod{},
|
||||
},
|
||||
"../examples/elasticsearch": {
|
||||
@@ -246,16 +248,17 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"hazelcast-controller": &api.ReplicationController{},
|
||||
"hazelcast-service": &api.Service{},
|
||||
},
|
||||
"../examples/kubernetes-namespaces": {
|
||||
"../docs/admin/namespaces": {
|
||||
"namespace-dev": &api.Namespace{},
|
||||
"namespace-prod": &api.Namespace{},
|
||||
},
|
||||
"../examples/limitrange": {
|
||||
"../docs/user-guide/limitrange": {
|
||||
"invalid-pod": &api.Pod{},
|
||||
"limit-range": &api.LimitRange{},
|
||||
"limits": &api.LimitRange{},
|
||||
"namespace": &api.Namespace{},
|
||||
"valid-pod": &api.Pod{},
|
||||
},
|
||||
"../examples/logging-demo": {
|
||||
"../docs/user-guide/logging-demo": {
|
||||
"synthetic_0_25lps": &api.Pod{},
|
||||
"synthetic_10lps": &api.Pod{},
|
||||
},
|
||||
@@ -276,7 +279,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"nfs-server-service": &api.Service{},
|
||||
"nfs-web-pod": &api.Pod{},
|
||||
},
|
||||
"../examples/node-selection": {
|
||||
"../docs/user-guide/node-selection": {
|
||||
"pod": &api.Pod{},
|
||||
},
|
||||
"../examples/openshift-origin": {
|
||||
@@ -295,7 +298,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"redis-sentinel-controller": &api.ReplicationController{},
|
||||
"redis-sentinel-service": &api.Service{},
|
||||
},
|
||||
"../examples/resourcequota": {
|
||||
"../docs/user-guide/resourcequota": {
|
||||
"namespace": &api.Namespace{},
|
||||
"limits": &api.LimitRange{},
|
||||
"quota": &api.ResourceQuota{},
|
||||
@@ -306,7 +309,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"driver-service": &api.Service{},
|
||||
"rc": &api.ReplicationController{},
|
||||
},
|
||||
"../examples/secrets": {
|
||||
"../docs/user-guide/secrets": {
|
||||
"secret-pod": &api.Pod{},
|
||||
"secret": &api.Secret{},
|
||||
},
|
||||
@@ -361,7 +364,7 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
t.Errorf("Expected no error, Got %v", err)
|
||||
}
|
||||
if tested != len(expected) {
|
||||
t.Errorf("Expected %d examples, Got %d", len(expected), tested)
|
||||
t.Errorf("Directory %v: Expected %d examples, Got %d", path, len(expected), tested)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -391,9 +394,9 @@ func TestReadme(t *testing.T) {
|
||||
expectedType []runtime.Object
|
||||
}{
|
||||
{"../README.md", []runtime.Object{&api.Pod{}}},
|
||||
{"../examples/walkthrough/README.md", []runtime.Object{&api.Pod{}}},
|
||||
{"../docs/user-guide/walkthrough/README.md", []runtime.Object{&api.Pod{}}},
|
||||
{"../examples/iscsi/README.md", []runtime.Object{&api.Pod{}}},
|
||||
{"../examples/simple-yaml.md", []runtime.Object{&api.Pod{}, &api.ReplicationController{}}},
|
||||
{"../docs/user-guide/simple-yaml.md", []runtime.Object{&api.Pod{}, &api.ReplicationController{}}},
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/explorer/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
### explorer
|
||||
|
||||
Explorer is a little container for examining the runtime environment kubernetes produces for your pods.
|
||||
@@ -12,8 +45,9 @@ Currently, you can look at:
|
||||
`pod.json` is supplied as an example. You can control the port it serves on with the -port flag.
|
||||
|
||||
Example from command line (the DNS lookup looks better from a web browser):
|
||||
```
|
||||
$ kubectl create -f pod.json
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/explorer/pod.json
|
||||
$ kubectl proxy &
|
||||
Starting to serve on localhost:8001
|
||||
|
||||
@@ -105,8 +139,8 @@ Result: ([]string)<nil>
|
||||
Error: <*>lookup elasticsearch-logging: no such host
|
||||
|
||||
LookupSRV("", "", elasticsearch-logging):
|
||||
cname: elasticsearch-logging.default.cluster.local.
|
||||
Result: ([]*net.SRV)[<*>{Target:(string)elasticsearch-logging.default.cluster.local. Port:(uint16)9200 Priority:(uint16)10 Weight:(uint16)100}]
|
||||
cname: elasticsearch-logging.default.svc.cluster.local.
|
||||
Result: ([]*net.SRV)[<*>{Target:(string)elasticsearch-logging.default.svc.cluster.local. Port:(uint16)9200 Priority:(uint16)10 Weight:(uint16)100}]
|
||||
Error: <nil>
|
||||
|
||||
LookupHost(elasticsearch-logging):
|
||||
@@ -127,4 +161,6 @@ Error: <*>lookup elasticsearch-logging: no such host
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/glusterfs/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Glusterfs
|
||||
|
||||
[Glusterfs](http://www.gluster.org) is an open source scale-out filesystem. These examples provide information about how to allow containers use Glusterfs volumes.
|
||||
@@ -9,6 +42,7 @@ The example assumes that you have already set up a Glusterfs server cluster and
|
||||
Set up Glusterfs server cluster; install Glusterfs client package on the Kubernetes nodes. ([Guide](https://www.howtoforge.com/high-availability-storage-with-glusterfs-3.2.x-on-debian-wheezy-automatic-file-replication-mirror-across-two-storage-servers))
|
||||
|
||||
### Create endpoints
|
||||
|
||||
Here is a snippet of [glusterfs-endpoints.json](glusterfs-endpoints.json),
|
||||
|
||||
```
|
||||
@@ -24,15 +58,18 @@ Here is a snippet of [glusterfs-endpoints.json](glusterfs-endpoints.json),
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
The "IP" field should be filled with the address of a node in the Glusterfs server cluster. In this example, it is fine to give any valid value (from 1 to 65535) to the "port" field.
|
||||
|
||||
Create the endpoints,
|
||||
```shell
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/glusterfs/glusterfs-endpoints.json
|
||||
```
|
||||
|
||||
You can verify that the endpoints are successfully created by running
|
||||
```shell
|
||||
|
||||
```sh
|
||||
$ kubectl get endpoints
|
||||
NAME ENDPOINTS
|
||||
glusterfs-cluster 10.240.106.152:1,10.240.79.157:1
|
||||
@@ -42,7 +79,7 @@ glusterfs-cluster 10.240.106.152:1,10.240.79.157:1
|
||||
|
||||
The following *volume* spec in [glusterfs-pod.json](glusterfs-pod.json) illustrates a sample configuration.
|
||||
|
||||
```js
|
||||
```json
|
||||
{
|
||||
"name": "glusterfsvol",
|
||||
"glusterfs": {
|
||||
@@ -60,12 +97,14 @@ The parameters are explained as the followings.
|
||||
- **readOnly** is the boolean that sets the mountpoint readOnly or readWrite.
|
||||
|
||||
Create a pod that has a container using Glusterfs volume,
|
||||
```shell
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/glusterfs/glusterfs-pod.json
|
||||
```
|
||||
|
||||
You can verify that the pod is running:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
glusterfs 1/1 Running 0 3m
|
||||
@@ -75,7 +114,8 @@ $ kubectl get pods glusterfs -t '{{.status.hostIP}}{{"\n"}}'
|
||||
```
|
||||
|
||||
You may ssh to the host (the hostIP) and run 'mount' to see if the Glusterfs volume is mounted,
|
||||
```shell
|
||||
|
||||
```sh
|
||||
$ mount | grep kube_vol
|
||||
10.240.106.152:kube_vol on /var/lib/kubelet/pods/f164a571-fa68-11e4-ad5c-42010af019b7/volumes/kubernetes.io~glusterfs/glusterfsvol type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072)
|
||||
```
|
||||
@@ -83,4 +123,6 @@ $ mount | grep kube_vol
|
||||
You may also run `docker ps` on the host to see the actual container.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/guestbook-go/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Guestbook Example
|
||||
|
||||
This example shows how to build a simple multi-tier web application using Kubernetes and Docker. The application consists of a web front-end, Redis master for storage, and replicated set of Redis slaves, all for which we will create Kubernetes replication controllers, pods, and services.
|
||||
@@ -5,108 +38,126 @@ This example shows how to build a simple multi-tier web application using Kubern
|
||||
If you are running a cluster in Google Container Engine (GKE), instead see the [Guestbook Example for Google Container Engine](https://cloud.google.com/container-engine/docs/tutorials/guestbook).
|
||||
|
||||
##### Table of Contents
|
||||
* [Step Zero: Prerequisites](<#step-zero)
|
||||
* [Step One: Create the Redis master pod](<#step-one)
|
||||
* [Step Two: Create the Redis master service](<#step-two)
|
||||
* [Step Three: Create the Redis slave pods](<#step-three)
|
||||
* [Step Four: Create the Redis slave service](<#step-four)
|
||||
* [Step Five: Create the guestbook pods](<#step-five)
|
||||
* [Step Six: Create the guestbook service](<#step-six)
|
||||
* [Step Seven: View the guestbook](<#step-seven)
|
||||
|
||||
* [Step Zero: Prerequisites](#step-zero)
|
||||
* [Step One: Create the Redis master pod](#step-one)
|
||||
* [Step Two: Create the Redis master service](#step-two)
|
||||
* [Step Three: Create the Redis slave pods](#step-three)
|
||||
* [Step Four: Create the Redis slave service](#step-four)
|
||||
* [Step Five: Create the guestbook pods](#step-five)
|
||||
* [Step Six: Create the guestbook service](#step-six)
|
||||
* [Step Seven: View the guestbook](#step-seven)
|
||||
* [Step Eight: Cleanup](#step-eight)
|
||||
|
||||
### Step Zero: Prerequisites <a id="step-zero"></a>
|
||||
|
||||
This example assumes that you have a working cluster. See the [Getting Started Guides](../../docs/getting-started-guides) for details about creating a cluster.
|
||||
This example assumes that you have a working cluster. See the [Getting Started Guides](../../docs/getting-started-guides/) for details about creating a cluster.
|
||||
|
||||
**Tip:** View all the `kubectl` commands, including their options and descriptions in the [kudectl CLI reference](../../docs/kubectl.md).
|
||||
**Tip:** View all the `kubectl` commands, including their options and descriptions in the [kubectl CLI reference](../../docs/user-guide/kubectl/kubectl.md).
|
||||
|
||||
### Step One: Create the Redis master pod<a id="step-one"></a>
|
||||
|
||||
Use the `examples/guestbook-go/redis-master-controller.json` file to create a [replication controller](../../docs/replication-controller.md) and Redis master [pod](../../docs/pods.md). The pod runs a Redis key-value server in a container. Using a replication controller is the preferred way to launch long-running pods, even for 1 replica, so that the pod benefits from the self-healing mechanism in Kubernetes (keeps the pods alive).
|
||||
Use the `examples/guestbook-go/redis-master-controller.json` file to create a [replication controller](../../docs/user-guide/replication-controller.md) and Redis master [pod](../../docs/user-guide/pods.md). The pod runs a Redis key-value server in a container. Using a replication controller is the preferred way to launch long-running pods, even for 1 replica, so that the pod benefits from the self-healing mechanism in Kubernetes (keeps the pods alive).
|
||||
|
||||
1. Use the [examples/guestbook-go/redis-master-controller.json](redis-master-controller.json) file to create the Redis master replication controller in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
|
||||
```shell
|
||||
<nop>1. Use the [redis-master-controller.json](redis-master-controller.json) file to create the Redis master replication controller in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-master-controller.json
|
||||
replicationcontrollers/redis-master
|
||||
```
|
||||
|
||||
2. To verify that the redis-master-controller is up, list all the replication controllers in the cluster with the `kubectl get rc` command:
|
||||
```shell
|
||||
<nop>2. To verify that the redis-master-controller is up, list all the replication controllers in the cluster with the `kubectl get rc` command:
|
||||
|
||||
```console
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
redis-master redis-master gurpartap/redis app=redis,role=master 1
|
||||
...
|
||||
```
|
||||
|
||||
Result: The replication controller then creates the single Redis master pod.
|
||||
|
||||
3. To verify that the redis-master pod is running, list all the pods in cluster with the `kubectl get pods` command:
|
||||
```shell
|
||||
<nop>3. To verify that the redis-master pod is running, list all the pods in cluster with the `kubectl get pods` command:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
redis-master-xx4uv 1/1 Running 0 1m
|
||||
...
|
||||
```
|
||||
|
||||
Result: You'll see a single Redis master pod and the machine where the pod is running after the pod gets placed (may take up to thirty seconds).
|
||||
|
||||
4. To verify what containers are running in the redis-master pod, you can SSH to that machine with `gcloud comput ssh --zone` *`zone_name`* *`host_name`* and then run `docker ps`:
|
||||
```shell
|
||||
<nop>4. To verify what containers are running in the redis-master pod, you can SSH to that machine with `gcloud comput ssh --zone` *`zone_name`* *`host_name`* and then run `docker ps`:
|
||||
|
||||
```console
|
||||
me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-minion-bz1p
|
||||
|
||||
me@kubernetes-minion-3:~$ sudo docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS
|
||||
d5c458dabe50 gurpartap/redis:latest "/usr/local/bin/redi 5 minutes ago Up 5 minutes
|
||||
```
|
||||
|
||||
Note: The initial `docker pull` can take a few minutes, depending on network conditions.
|
||||
|
||||
### Step Two: Create the Redis master service <a id="step-two"></a>
|
||||
A Kubernetes '[service](../../docs/services.md)' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables or DNS.
|
||||
|
||||
A Kubernetes '[service](../../docs/user-guide/services.md)' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables or DNS.
|
||||
|
||||
Services find the containers to load balance based on pod labels. The pod that you created in Step One has the label `app=redis` and `role=master`. The selector field of the service determines which pods will receive the traffic sent to the service.
|
||||
|
||||
1. Use the [examples/guestbook-go/redis-master-service.json](redis-master-service.json) file to create the service in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
|
||||
```shell
|
||||
<nop>1. Use the [redis-master-service.json](redis-master-service.json) file to create the service in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-master-service.json
|
||||
services/redis-master
|
||||
```
|
||||
|
||||
2. To verify that the redis-master service is up, list all the services in the cluster with the `kubectl get services` command:
|
||||
```shell
|
||||
<nop>2. To verify that the redis-master service is up, list all the services in the cluster with the `kubectl get services` command:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
||||
...
|
||||
```
|
||||
|
||||
Result: All new pods will see the `redis-master` service running on the host (`$REDIS_MASTER_SERVICE_HOST` environment variable) at port 6379, or running on `redis-master:6379`. After the service is created, the service proxy on each node is configured to set up a proxy on the specified port (in our example, that's port 6379).
|
||||
|
||||
|
||||
### Step Three: Create the Redis slave pods <a id="step-three"></a>
|
||||
|
||||
The Redis master we created earlier is a single pod (REPLICAS = 1), while the Redis read slaves we are creating here are 'replicated' pods. In Kubernetes, a replication controller is responsible for managing the multiple instances of a replicated pod.
|
||||
|
||||
1. Use the file [examples/guestbook-go/redis-slave-controller.json](redis-slave-controller.json) to create the replication controller by running the `kubectl create -f` *`filename`* command:
|
||||
```shell
|
||||
<nop>1. Use the file [redis-slave-controller.json](redis-slave-controller.json) to create the replication controller by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-slave-controller.json
|
||||
replicationcontrollers/redis-slave
|
||||
```
|
||||
|
||||
2. To verify that the guestbook replication controller is running, run the `kubectl get rc` command:
|
||||
```shell
|
||||
<nop>2. To verify that the guestbook replication controller is running, run the `kubectl get rc` command:
|
||||
|
||||
```console
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
redis-master redis-master gurpartap/redis app=redis,role=master 1
|
||||
redis-slave redis-slave gurpartap/redis app=redis,role=slave 2
|
||||
...
|
||||
```
|
||||
|
||||
Result: The replication controller creates and configures the Redis slave pods through the redis-master service (name:port pair, in our example that's `redis-master:6379`).
|
||||
|
||||
Example:
|
||||
The Redis slaves get started by the replication controller with the following command:
|
||||
```shell
|
||||
|
||||
```console
|
||||
redis-server --slaveof redis-master 6379
|
||||
```
|
||||
|
||||
2. To verify that the Redis master and slaves pods are running, run the `kubectl get pods` command:
|
||||
```shell
|
||||
<nop>3. To verify that the Redis master and slaves pods are running, run the `kubectl get pods` command:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
redis-master-xx4uv 1/1 Running 0 18m
|
||||
@@ -114,26 +165,30 @@ The Redis master we created earlier is a single pod (REPLICAS = 1), while the Re
|
||||
redis-slave-iai40 1/1 Running 0 1m
|
||||
...
|
||||
```
|
||||
|
||||
Result: You see the single Redis master and two Redis slave pods.
|
||||
|
||||
### Step Four: Create the Redis slave service <a id="step-four"></a>
|
||||
|
||||
Just like the master, we want to have a service to proxy connections to the read slaves. In this case, in addition to discovery, the Redis slave service provides transparent load balancing to clients.
|
||||
|
||||
1. Use the [examples/guestbook-go/redis-slave-service.json](redis-slave-service.json) file to create the Redis slave service by running the `kubectl create -f` *`filename`* command:
|
||||
```shell
|
||||
<nop>1. Use the [redis-slave-service.json](redis-slave-service.json) file to create the Redis slave service by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-slave-service.json
|
||||
services/redis-slave
|
||||
```
|
||||
|
||||
2. To verify that the redis-slave service is up, list all the services in the cluster with the `kubectl get services` command:
|
||||
```shell
|
||||
<nop>2. To verify that the redis-slave service is up, list all the services in the cluster with the `kubectl get services` command:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
||||
redis-slave app=redis,role=slave app=redis,role=slave 10.0.21.92 6379/TCP
|
||||
...
|
||||
```
|
||||
|
||||
Result: The service is created with labels `app=redis` and `role=slave` to identify that the pods are running the Redis slaves.
|
||||
|
||||
Tip: It is helpful to set labels on your services themselves--as we've done here--to make it easy to locate them later.
|
||||
@@ -142,13 +197,15 @@ Tip: It is helpful to set labels on your services themselves--as we've done here
|
||||
|
||||
This is a simple Go `net/http` ([negroni](https://github.com/codegangsta/negroni) based) server that is configured to talk to either the slave or master services depending on whether the request is a read or a write. The pods we are creating expose a simple JSON interface and serves a jQuery-Ajax based UI. Like the Redis read slaves, these pods are also managed by a replication controller.
|
||||
|
||||
1. Use the [examples/guestbook-go/guestbook-controller.json](guestbook-controller.json) file to create the guestbook replication controller by running the `kubectl create -f` *`filename`* command:
|
||||
```shell
|
||||
<nop>1. Use the [guestbook-controller.json](guestbook-controller.json) file to create the guestbook replication controller by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/guestbook-controller.json
|
||||
replicationcontrollers/guestbook
|
||||
```
|
||||
|
||||
2. To verify that the guestbook replication controller is running, run the `kubectl get rc` command:
|
||||
<nop>2. To verify that the guestbook replication controller is running, run the `kubectl get rc` command:
|
||||
|
||||
```
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
@@ -158,8 +215,9 @@ This is a simple Go `net/http` ([negroni](https://github.com/codegangsta/negroni
|
||||
...
|
||||
```
|
||||
|
||||
3. To verify that the guestbook pods are running (it might take up to thirty seconds to create the pods), list all the pods in cluster with the `kubectl get pods` command:
|
||||
```shell
|
||||
<nop>3. To verify that the guestbook pods are running (it might take up to thirty seconds to create the pods), list all the pods in cluster with the `kubectl get pods` command:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
guestbook-3crgn 1/1 Running 0 2m
|
||||
@@ -170,22 +228,22 @@ This is a simple Go `net/http` ([negroni](https://github.com/codegangsta/negroni
|
||||
redis-slave-iai40 1/1 Running 0 6m
|
||||
...
|
||||
```
|
||||
|
||||
Result: You see a single Redis master, two Redis slaves, and three guestbook pods.
|
||||
|
||||
### Step Six: Create the guestbook service <a id="step-six"></a>
|
||||
|
||||
Just like the others, we create a service to group the guestbook pods but this time, to make the guestbook front-end externally visible, we specify `"type": "LoadBalancer"`.
|
||||
|
||||
1. Use the [examples/guestbook-go/guestbook-service.json](guestbook-service.json) file to create the guestbook service by running the `kubectl create -f` *`filename`* command:
|
||||
```shell
|
||||
<nop>1. Use the [guestbook-service.json](guestbook-service.json) file to create the guestbook service by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/guestbook-service.json
|
||||
An external load-balanced service was created. On many platforms (e.g. Google Compute Engine),
|
||||
you will also need to explicitly open a Firewall rule for the service port(s) (tcp:3000) to serve traffic.
|
||||
See https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/services-firewall.md for more details.
|
||||
```
|
||||
|
||||
|
||||
2. To verify that the guestbook service is up, list all the services in the cluster with the `kubectl get services` command:
|
||||
<nop>2. To verify that the guestbook service is up, list all the services in the cluster with the `kubectl get services` command:
|
||||
|
||||
```
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
@@ -195,6 +253,7 @@ Just like the others, we create a service to group the guestbook pods but this t
|
||||
redis-slave app=redis,role=slave app=redis,role=slave 10.0.21.92 6379/TCP
|
||||
...
|
||||
```
|
||||
|
||||
Result: The service is created with label `app=guestbook`.
|
||||
|
||||
### Step Seven: View the guestbook <a id="step-seven"></a>
|
||||
@@ -209,14 +268,6 @@ You can now play with the guestbook that you just created by opening it in a bro
|
||||
|
||||
2. Append port `3000` to the IP address (for example `http://146.148.81.8:3000`), and then navigate to that address in your browser.
|
||||
|
||||
**Remember:** You might need to open the firewall for port `3000`.
|
||||
If you're using Google Compute Engine, you can use the [Developers Console][cloud-console] or the `gcloud` CLI to open port `3000`.
|
||||
|
||||
To use the `gcloud` CLI, you can run the following command to allow traffic from any source to instances tagged `kubernetes-minion`:
|
||||
```shell
|
||||
$ gcloud compute firewall-rules create --allow=tcp:3000 --target-tags=kubernetes-minion kubernetes-minion-3000
|
||||
```
|
||||
|
||||
Result: The guestbook displays in your browser:
|
||||
|
||||

|
||||
@@ -232,7 +283,8 @@ You can now play with the guestbook that you just created by opening it in a bro
|
||||
After you're done playing with the guestbook, you can cleanup by deleting the guestbook service and removing the associated resources that were created, including load balancers, forwarding rules, target pools, and Kuberentes replication controllers and services.
|
||||
|
||||
Delete all the resources by running the following `kubectl delete -f` *`filename`* command:
|
||||
```shell
|
||||
|
||||
```console
|
||||
$ kubectl delete -f examples/guestbook-go
|
||||
guestbook-controller
|
||||
guestbook
|
||||
@@ -243,7 +295,9 @@ redis-slave
|
||||
```
|
||||
|
||||
Tip: To turn down your Kubernetes cluster, follow the corresponding instructions in the version of the
|
||||
[Getting Started Guides](../../docs/getting-started-guides) that you previously used to create your cluster.
|
||||
[Getting Started Guides](../../docs/getting-started-guides/) that you previously used to create your cluster.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/guestbook-go/_src/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Building and releasing Guestbook Image
|
||||
|
||||
This process employs building two docker images, one compiles the source and the other hosts the compiled binaries.
|
||||
@@ -34,4 +67,6 @@ Accepts an optional tag (defaults to "latest")
|
||||
./script/clean.sh
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -4,7 +4,7 @@
|
||||
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
|
||||
<meta charset="utf-8">
|
||||
<meta content="width=device-width" name="viewport">
|
||||
<link href="/style.css" rel="stylesheet">
|
||||
<link href="style.css" rel="stylesheet">
|
||||
<title>Guestbook</title>
|
||||
</head>
|
||||
<body>
|
||||
@@ -25,10 +25,10 @@
|
||||
|
||||
<div>
|
||||
<p><h2 id="guestbook-host-address"></h2></p>
|
||||
<p><a href="/env">/env</a>
|
||||
<a href="/info">/info</a></p>
|
||||
<p><a href="env">/env</a>
|
||||
<a href="info">/info</a></p>
|
||||
</div>
|
||||
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
|
||||
<script src="/script.js"></script>
|
||||
<script src="script.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
@@ -1,3 +1,35 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/guestbook/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
|
||||
## Guestbook Example
|
||||
@@ -33,15 +65,15 @@ The web front end interacts with the redis master via javascript redis API calls
|
||||
|
||||
### Step Zero: Prerequisites
|
||||
|
||||
This example requires a running Kubernetes cluster. See the [Getting Started guides](../../docs/getting-started-guides) for how to get started. As noted above, if you have a Google Container Engine cluster set up, go [here](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead.
|
||||
This example requires a running Kubernetes cluster. See the [Getting Started guides](../../docs/getting-started-guides/) for how to get started. As noted above, if you have a Google Container Engine cluster set up, go [here](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead.
|
||||
|
||||
### Step One: Start up the redis master
|
||||
|
||||
**Note**: The redis master in this example is *not* highly available. Making it highly available would be an interesting, but intricate exercise— redis doesn't actually support multi-master deployments at this point in time, so high availability would be a somewhat tricky thing to implement, and might involve periodic serialization to disk, and so on.
|
||||
|
||||
To start the redis master, use the file `examples/guestbook/redis-master-controller.yaml`, which describes a single [pod](../../docs/pods.md) running a redis key-value server in a container.
|
||||
To start the redis master, use the file `examples/guestbook/redis-master-controller.yaml`, which describes a single [pod](../../docs/user-guide/pods.md) running a redis key-value server in a container.
|
||||
|
||||
Although we have a single instance of our redis master, we are using a [replication controller](../../docs/replication-controller.md) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the replication controller will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.)
|
||||
Although we have a single instance of our redis master, we are using a [replication controller](../../docs/user-guide/replication-controller.md) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the replication controller will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.)
|
||||
|
||||
Here is `redis-master-controller.yaml`:
|
||||
|
||||
@@ -70,15 +102,15 @@ spec:
|
||||
|
||||
Change to the `<kubernetes>/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f redis-master-controller.yaml
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/redis-master-controller.yaml
|
||||
replicationcontrollers/redis-master
|
||||
```
|
||||
|
||||
The `replicationcontrollers/redis-master` line is the expected response to this operation.
|
||||
You can see the replication controllers for your cluster by running:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
redis-master master redis name=redis-master 1
|
||||
@@ -86,25 +118,26 @@ redis-master master redis
|
||||
|
||||
Then, you can list the pods in the cluster, to verify that the master is running:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get pods
|
||||
```
|
||||
|
||||
You'll see all pods in the cluster, including the redis master pod, and the status of each pod.
|
||||
The name of the redis master will look similar to that in the following list:
|
||||
|
||||
```shell
|
||||
```console
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
redis-master-dz33o 1/1 Running 0 2h
|
||||
```
|
||||
|
||||
(Note that an initial `docker pull` to grab a container image may take a few minutes, depending on network conditions. A pod will be reported as `Pending` while its image is being downloaded.)
|
||||
|
||||
#### Optional Interlude
|
||||
|
||||
You can get information about a pod, including the machine that it is running on, via `kubectl describe pods/<pod_name>`. E.g., for the redis master, you should see something like the following (your pod name will be different):
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl describe pods/redis-master-dz33o
|
||||
...
|
||||
Name: redis-master-dz33o
|
||||
@@ -130,7 +163,7 @@ The 'Node' is the name of the machine, e.g. `kubernetes-minion-krxw` in the exam
|
||||
|
||||
If you want to view the container logs for a given pod, you can run:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl logs <pod_name>
|
||||
```
|
||||
|
||||
@@ -138,13 +171,13 @@ These logs will usually give you enough information to troubleshoot.
|
||||
|
||||
However, if you should want to SSH to the listed host machine, you can inspect various logs there directly as well. For example, with Google Compute Engine, using `gcloud`, you can SSH like this:
|
||||
|
||||
```shell
|
||||
```console
|
||||
me@workstation$ gcloud compute ssh kubernetes-minion-krxw
|
||||
```
|
||||
|
||||
Then, you can look at the docker containers on the remote machine. You should see something like this (the specifics of the IDs will be different):
|
||||
|
||||
```shell
|
||||
```console
|
||||
me@kubernetes-minion-krxw:~$ sudo docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
...
|
||||
@@ -153,13 +186,13 @@ CONTAINER ID IMAGE COMMAND
|
||||
|
||||
If you want to see the logs for a given container, you can run:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ docker logs <container_id>
|
||||
```
|
||||
|
||||
### Step Two: Fire up the redis master service
|
||||
|
||||
A Kubernetes [service](../../docs/services.md) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](../../docs/labels.md) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP.
|
||||
A Kubernetes [service](../../docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](../../docs/user-guide/labels.md) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP.
|
||||
|
||||
Services find the pods to load balance based on the pods' labels.
|
||||
The pod that you created in [Step One](#step-one-start-up-the-redis-master) has the label `name=redis-master`.
|
||||
@@ -185,13 +218,14 @@ spec:
|
||||
|
||||
Create the service by running:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f redis-master-service.yaml
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/redis-master-service.yaml
|
||||
services/redis-master
|
||||
```
|
||||
|
||||
Then check the list of services, which should include the redis-master:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379
|
||||
@@ -210,9 +244,9 @@ The traffic flow from slaves to masters can be described in two steps, like so:
|
||||
|
||||
Kubernetes supports two primary modes of finding a service— environment variables and DNS.
|
||||
|
||||
The services in a Kubernetes cluster are discoverable inside other containers [via environment variables](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#environment-variables).
|
||||
The services in a Kubernetes cluster are discoverable inside other containers [via environment variables](../../docs/user-guide/services.md#environment-variables).
|
||||
|
||||
An alternative is to use the [cluster's DNS service](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#dns), if it has been enabled for the cluster. This lets all pods do name resolution of services automatically, based on the service name.
|
||||
An alternative is to use the [cluster's DNS service](../../docs/user-guide/services.md#dns), if it has been enabled for the cluster. This lets all pods do name resolution of services automatically, based on the service name.
|
||||
We'll use the DNS service for this example. E.g., you can see the service name, `redis-master`, accessed as a `host` value in the PHP script in [Step 5](#step-five-create-the-frontend-replicated-pods).
|
||||
|
||||
**Note**: **If your cluster does not have the DNS service enabled, then this example will not work out of the box.** You will need to edit `examples/guestbook/php-redis/index.php` to use environment variables for service discovery instead, then rebuild the container image from the `Dockerfile` in that directory. (However, this is unlikely to be necessary. You can check for the DNS service in the list of the clusters' services.)
|
||||
@@ -253,8 +287,8 @@ spec:
|
||||
|
||||
and create the replication controller by running:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f redis-slave-controller.yaml
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/redis-slave-controller.yaml
|
||||
replicationcontrollers/redis-slave
|
||||
|
||||
$ kubectl get rc
|
||||
@@ -265,7 +299,7 @@ redis-slave slave kubernetes/redis-
|
||||
|
||||
Once the replication controller is up, you can list the pods in the cluster, to verify that the master and slaves are running. You should see a list that includes something like the following:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
@@ -301,8 +335,8 @@ This time the selector for the service is `name=redis-slave`, because that ident
|
||||
|
||||
Now that you have created the service specification, create it in your cluster by running:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f redis-slave-service.yaml
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/redis-slave-service.yaml
|
||||
services/redis-slave
|
||||
|
||||
$ kubectl get services
|
||||
@@ -312,6 +346,7 @@ redis-slave name=redis-slave name=redis-sla
|
||||
```
|
||||
|
||||
### Step Five: Create the frontend replicated pods
|
||||
|
||||
<a href="#step-five-create-the-frontend-replicated-pods"></a>
|
||||
|
||||
A frontend pod is a simple PHP server that is configured to talk to either the slave or master services, depending on whether the client request is a read or a write. It exposes a simple AJAX interface, and serves an Angular-based UX.
|
||||
@@ -344,14 +379,14 @@ spec:
|
||||
|
||||
Using this file, you can turn up your frontend with:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f frontend-controller.yaml
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/frontend-controller.yaml
|
||||
replicationcontrollers/frontend
|
||||
```
|
||||
|
||||
Then, list all your replication controllers:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 3
|
||||
@@ -361,7 +396,7 @@ redis-slave slave kubernetes/redis-
|
||||
|
||||
Once it's up (again, it may take up to thirty seconds to create the pods) you can list the pods in the cluster, to verify that the master, slaves and frontends are all running. You should see a list that includes something like the following:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
...
|
||||
@@ -453,14 +488,14 @@ To do this, uncomment the `type: LoadBalancer` line in the `frontend-service.yam
|
||||
|
||||
Create the service like this:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f frontend-service.yaml
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/frontend-service.yaml
|
||||
services/frontend
|
||||
```
|
||||
|
||||
Then, list all your services again:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
frontend name=frontend name=frontend 10.0.93.211 80/TCP
|
||||
@@ -470,15 +505,16 @@ redis-slave name=redis-slave name=redis-sla
|
||||
|
||||
|
||||
#### Accessing the guestbook site externally
|
||||
|
||||
<a href="#accessing-the-guestbook-site-externally"></a>
|
||||
|
||||
You'll want to set up your guestbook service so that it can be accessed from outside of the internal Kubernetes network. Above, we introduced one way to do that, using the `type: LoadBalancer` spec.
|
||||
|
||||
More generally, Kubernetes supports two ways of exposing a service onto an external IP address: `NodePort`s and `LoadBalancer`s , as described [here](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#external-services).
|
||||
More generally, Kubernetes supports two ways of exposing a service onto an external IP address: `NodePort`s and `LoadBalancer`s , as described [here](../../docs/user-guide/services.md#external-services).
|
||||
|
||||
If the `LoadBalancer` specification is used, it can take a short period for an external IP to show up in `kubectl get services` output, but you should shortly see it listed as well, e.g. like this:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
frontend name=frontend name=frontend 10.0.93.211 80/TCP
|
||||
@@ -501,7 +537,7 @@ In Google Compute Engine, `kubectl` automatically creates forwarding rule for se
|
||||
|
||||
You can list the forwarding rules like this. The forwarding rule also indicates the external IP.
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ gcloud compute forwarding-rules list
|
||||
NAME REGION IP_ADDRESS IP_PROTOCOL TARGET
|
||||
frontend us-central1 130.211.188.51 TCP us-central1/targetPools/frontend
|
||||
@@ -509,7 +545,7 @@ frontend us-central1 130.211.188.51 TCP us-central1/targetP
|
||||
|
||||
In Google Compute Engine, you also may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion` (replace with your tags as appropriate):
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ gcloud compute firewall-rules create --allow=tcp:80 --target-tags=kubernetes-minion kubernetes-minion-80
|
||||
```
|
||||
|
||||
@@ -522,14 +558,14 @@ For Google Compute Engine details about limiting traffic to specific sources, se
|
||||
|
||||
If you are in a live kubernetes cluster, you can just kill the pods by stopping the replication controllers and deleting the services. Using labels to select the resources to stop or delete is an easy way to do this in one command.
|
||||
|
||||
```shell
|
||||
```console
|
||||
kubectl stop rc -l "name in (redis-master, redis-slave, frontend)"
|
||||
kubectl delete service -l "name in (redis-master, redis-slave, frontend)"
|
||||
```
|
||||
|
||||
To completely tear down a Kubernetes cluster, if you ran this from source, you can use:
|
||||
|
||||
```shell
|
||||
```console
|
||||
$ <kubernetes>/cluster/kube-down.sh
|
||||
```
|
||||
|
||||
@@ -539,5 +575,7 @@ If you are having trouble bringing up your guestbook app, double check that your
|
||||
|
||||
Then, see the [troubleshooting documentation](../../docs/troubleshooting.md) for a further list of common issues and how you can diagnose them.
|
||||
|
||||
[]()
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -10,6 +10,6 @@ spec:
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 80
|
||||
- port: 80
|
||||
selector:
|
||||
name: frontend
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/hazelcast/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Cloud Native Deployments of Hazelcast using Kubernetes
|
||||
|
||||
The following document describes the development of a _cloud native_ [Hazelcast](http://hazelcast.org/) deployment on Kubernetes. When we say _cloud native_ we mean an application which understands that it is running within a cluster manager, and uses this cluster management infrastructure to help implement the application. In particular, in this instance, a custom Hazelcast ```bootstrapper``` is used to enable Hazelcast to dynamically discover Hazelcast nodes that have already joined the cluster.
|
||||
@@ -7,9 +40,11 @@ Any topology changes are communicated and handled by Hazelcast nodes themselves.
|
||||
This document also attempts to describe the core components of Kubernetes: _Pods_, _Services_, and _Replication Controllers_.
|
||||
|
||||
### Prerequisites
|
||||
This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting started](https://github.com/GoogleCloudPlatform/kubernetes/tree/master/docs/getting-started-guides) for installation instructions for your platform.
|
||||
|
||||
This example assumes that you have a Kubernetes cluster installed and running, and that you have installed the `kubectl` command line tool somewhere in your path. Please see the [getting started](../../docs/getting-started-guides/) for installation instructions for your platform.
|
||||
|
||||
### A note for the impatient
|
||||
|
||||
This is a somewhat long tutorial. If you want to jump straight to the "do it now" commands, please see the [tl; dr](#tl-dr) at the end.
|
||||
|
||||
### Sources
|
||||
@@ -17,18 +52,21 @@ This is a somewhat long tutorial. If you want to jump straight to the "do it no
|
||||
Source is freely available at:
|
||||
* Hazelcast Discovery - https://github.com/pires/hazelcast-kubernetes-bootstrapper
|
||||
* Dockerfile - https://github.com/pires/hazelcast-kubernetes
|
||||
* Docker Trusted Build - https://registry.hub.docker.com/u/pires/hazelcast-k8s
|
||||
* Docker Trusted Build - https://quay.io/repository/pires/hazelcast-kubernetes
|
||||
|
||||
### Simple Single Pod Hazelcast Node
|
||||
In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes.
|
||||
|
||||
In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes.
|
||||
|
||||
In this case, we shall not run a single Hazelcast pod, because the discovery mechanism now relies on a service definition.
|
||||
|
||||
|
||||
### Adding a Hazelcast Service
|
||||
In Kubernetes a _[Service](../../docs/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods.
|
||||
|
||||
In Kubernetes a _[Service](../../docs/user-guide/services.md)_ describes a set of Pods that perform the same task. For example, the set of nodes in a Hazelcast cluster. An important use for a Service is to create a load balancer which distributes traffic across members of the set. But a _Service_ can also be used as a standing query which makes a dynamically changing set of Pods available via the Kubernetes API. This is actually how our discovery mechanism works, by relying on the service to discover other Hazelcast pods.
|
||||
|
||||
Here is the service description:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
@@ -39,7 +77,6 @@ metadata:
|
||||
spec:
|
||||
ports:
|
||||
- port: 5701
|
||||
targetPort: 5701
|
||||
selector:
|
||||
name: hazelcast
|
||||
```
|
||||
@@ -47,14 +84,16 @@ spec:
|
||||
The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service.
|
||||
|
||||
Create this service as follows:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f hazelcast-service.yaml
|
||||
$ kubectl create -f examples/hazelcast/hazelcast-service.yaml
|
||||
```
|
||||
|
||||
### Adding replicated nodes
|
||||
|
||||
The real power of Kubernetes and Hazelcast lies in easily building a replicated, resizable Hazelcast cluster.
|
||||
|
||||
In Kubernetes a _[Replication Controller](../../docs/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
In Kubernetes a _[Replication Controller](../../docs/user-guide/replication-controller.md)_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
|
||||
Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Hazelcast Pod.
|
||||
|
||||
@@ -77,18 +116,22 @@ spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.5
|
||||
image: quay.io/pires/hazelcast-kubernetes:0.3.1
|
||||
cpu: 0.1
|
||||
image: quay.io/pires/hazelcast-kubernetes:0.5
|
||||
name: hazelcast
|
||||
env:
|
||||
- name: "DNS_DOMAIN"
|
||||
value: "cluster.local"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 5701
|
||||
name: hazelcast
|
||||
```
|
||||
|
||||
There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.3.1`. This is a `busybox` installation with JRE 8. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingle. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later).
|
||||
There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingle. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later).
|
||||
|
||||
You may also note that we tell Kubernetes that the container exposes the `hazelcast` port. Finally, we tell the cluster manager that we need 1 cpu core.
|
||||
|
||||
@@ -99,35 +142,50 @@ Last but not least, we set `DNS_DOMAIN` environment variable according to your K
|
||||
Create this controller:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f hazelcast-controller.yaml
|
||||
$ kubectl create -f examples/hazelcast/hazelcast-controller.yaml
|
||||
```
|
||||
|
||||
After the controller provisions successfully the pod, you can query the service endpoints:
|
||||
|
||||
```sh
|
||||
$ kubectl get endpoints hazelcast -o yaml
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
creationTimestamp: 2015-05-04T17:43:40Z
|
||||
labels:
|
||||
name: hazelcast
|
||||
name: hazelcast
|
||||
namespace: default
|
||||
resourceVersion: "120480"
|
||||
selfLink: /api/v1/namespaces/default/endpoints/hazelcast
|
||||
uid: 19a22aa9-f285-11e4-b38f-42010af0bbf9
|
||||
subsets:
|
||||
- addresses:
|
||||
- IP: 10.245.2.68
|
||||
targetRef:
|
||||
kind: Pod
|
||||
name: hazelcast
|
||||
namespace: default
|
||||
resourceVersion: "120479"
|
||||
uid: d7238173-f283-11e4-b38f-42010af0bbf9
|
||||
ports:
|
||||
- port: 5701
|
||||
protocol: TCP
|
||||
$ kubectl get endpoints hazelcast -o json
|
||||
{
|
||||
"kind": "Endpoints",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "hazelcast",
|
||||
"namespace": "default",
|
||||
"selfLink": "/api/v1/namespaces/default/endpoints/hazelcast",
|
||||
"uid": "094e507a-2700-11e5-abbc-080027eae546",
|
||||
"resourceVersion": "4094",
|
||||
"creationTimestamp": "2015-07-10T12:34:41Z",
|
||||
"labels": {
|
||||
"name": "hazelcast"
|
||||
}
|
||||
},
|
||||
"subsets": [
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"ip": "10.244.37.3",
|
||||
"targetRef": {
|
||||
"kind": "Pod",
|
||||
"namespace": "default",
|
||||
"name": "hazelcast-nsyzn",
|
||||
"uid": "f57eb6b0-2706-11e5-abbc-080027eae546",
|
||||
"resourceVersion": "4093"
|
||||
}
|
||||
}
|
||||
],
|
||||
"ports": [
|
||||
{
|
||||
"port": 5701,
|
||||
"protocol": "TCP"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
You can see that the _Service_ has found the pod created by the replication controller.
|
||||
@@ -135,6 +193,7 @@ You can see that the _Service_ has found the pod created by the replication cont
|
||||
Now it gets even more interesting.
|
||||
|
||||
Let's scale our cluster to 2 pods:
|
||||
|
||||
```sh
|
||||
$ kubectl scale rc hazelcast --replicas=2
|
||||
```
|
||||
@@ -144,56 +203,60 @@ Now if you list the pods in your cluster, you should see two hazelcast pods:
|
||||
```sh
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
hazelcast-1vmnv 1/1 Running 0 34s
|
||||
hazelcast-ezs01 1/1 Running 0 43s
|
||||
hazelcast-nanfb 1/1 Running 0 40s
|
||||
hazelcast-nsyzn 1/1 Running 0 2m
|
||||
kube-dns-xudrp 3/3 Running 0 1h
|
||||
```
|
||||
|
||||
To prove that this all works, you can use the `log` command to examine the logs of one pod, for example:
|
||||
|
||||
```sh
|
||||
$ kubectl logs hazelcast-ulkws hazelcast
|
||||
2015-05-09 22:06:20.016 INFO 5 --- [ main] com.github.pires.hazelcast.Application : Starting Application v0.2-SNAPSHOT on hazelcast-enyli with PID 5 (/bootstrapper.jar started by root in /)
|
||||
2015-05-09 22:06:20.071 INFO 5 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@5424f110: startup date [Sat May 09 22:06:20 GMT 2015]; root of context hierarchy
|
||||
2015-05-09 22:06:21.511 INFO 5 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup
|
||||
2015-05-09 22:06:21.549 INFO 5 --- [ main] c.g.p.h.HazelcastDiscoveryController : Asking k8s registry at https://kubernetes.default.cluster.local..
|
||||
2015-05-09 22:06:22.031 INFO 5 --- [ main] c.g.p.h.HazelcastDiscoveryController : Found 2 pods running Hazelcast.
|
||||
2015-05-09 22:06:22.176 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.4.2] Interfaces is disabled, trying to pick one address from TCP-IP config addresses: [10.244.90.3, 10.244.66.2]
|
||||
2015-05-09 22:06:22.177 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.4.2] Prefer IPv4 stack is true.
|
||||
2015-05-09 22:06:22.189 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.4.2] Picked Address[10.244.66.2]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is true
|
||||
2015-05-09 22:06:22.642 INFO 5 --- [ main] com.hazelcast.spi.OperationService : [10.244.66.2]:5701 [someGroup] [3.4.2] Backpressure is disabled
|
||||
2015-05-09 22:06:22.647 INFO 5 --- [ main] c.h.spi.impl.BasicOperationScheduler : [10.244.66.2]:5701 [someGroup] [3.4.2] Starting with 2 generic operation threads and 2 partition operation threads.
|
||||
2015-05-09 22:06:22.796 INFO 5 --- [ main] com.hazelcast.system : [10.244.66.2]:5701 [someGroup] [3.4.2] Hazelcast 3.4.2 (20150326 - f6349a4) starting at Address[10.244.66.2]:5701
|
||||
2015-05-09 22:06:22.798 INFO 5 --- [ main] com.hazelcast.system : [10.244.66.2]:5701 [someGroup] [3.4.2] Copyright (C) 2008-2014 Hazelcast.com
|
||||
2015-05-09 22:06:22.800 INFO 5 --- [ main] com.hazelcast.instance.Node : [10.244.66.2]:5701 [someGroup] [3.4.2] Creating TcpIpJoiner
|
||||
2015-05-09 22:06:22.801 INFO 5 --- [ main] com.hazelcast.core.LifecycleService : [10.244.66.2]:5701 [someGroup] [3.4.2] Address[10.244.66.2]:5701 is STARTING
|
||||
2015-05-09 22:06:23.108 INFO 5 --- [cached.thread-2] com.hazelcast.nio.tcp.SocketConnector : [10.244.66.2]:5701 [someGroup] [3.4.2] Connecting to /10.244.90.3:5701, timeout: 0, bind-any: true
|
||||
2015-05-09 22:06:23.182 INFO 5 --- [cached.thread-2] c.h.nio.tcp.TcpIpConnectionManager : [10.244.66.2]:5701 [someGroup] [3.4.2] Established socket connection between /10.244.66.2:48051 and 10.244.90.3/10.244.90.3:5701
|
||||
2015-05-09 22:06:29.158 INFO 5 --- [ration.thread-1] com.hazelcast.cluster.ClusterService : [10.244.66.2]:5701 [someGroup] [3.4.2]
|
||||
$ kubectl log hazelcast-nanfb hazelcast
|
||||
2015-07-10 13:26:34.443 INFO 5 --- [ main] com.github.pires.hazelcast.Application : Starting Application on hazelcast-nanfb with PID 5 (/bootstrapper.jar started by root in /)
|
||||
2015-07-10 13:26:34.535 INFO 5 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@42cfcf1: startup date [Fri Jul 10 13:26:34 GMT 2015]; root of context hierarchy
|
||||
2015-07-10 13:26:35.888 INFO 5 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup
|
||||
2015-07-10 13:26:35.924 INFO 5 --- [ main] c.g.p.h.HazelcastDiscoveryController : Asking k8s registry at https://kubernetes.default.svc.cluster.local..
|
||||
2015-07-10 13:26:37.259 INFO 5 --- [ main] c.g.p.h.HazelcastDiscoveryController : Found 2 pods running Hazelcast.
|
||||
2015-07-10 13:26:37.404 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.5] Interfaces is disabled, trying to pick one address from TCP-IP config addresses: [10.244.77.3, 10.244.37.3]
|
||||
2015-07-10 13:26:37.405 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.5] Prefer IPv4 stack is true.
|
||||
2015-07-10 13:26:37.415 INFO 5 --- [ main] c.h.instance.DefaultAddressPicker : [LOCAL] [someGroup] [3.5] Picked Address[10.244.77.3]:5701, using socket ServerSocket[addr=/0:0:0:0:0:0:0:0,localport=5701], bind any local is true
|
||||
2015-07-10 13:26:37.852 INFO 5 --- [ main] com.hazelcast.spi.OperationService : [10.244.77.3]:5701 [someGroup] [3.5] Backpressure is disabled
|
||||
2015-07-10 13:26:37.879 INFO 5 --- [ main] c.h.s.i.o.c.ClassicOperationExecutor : [10.244.77.3]:5701 [someGroup] [3.5] Starting with 2 generic operation threads and 2 partition operation threads.
|
||||
2015-07-10 13:26:38.531 INFO 5 --- [ main] com.hazelcast.system : [10.244.77.3]:5701 [someGroup] [3.5] Hazelcast 3.5 (20150617 - 4270dc6) starting at Address[10.244.77.3]:5701
|
||||
2015-07-10 13:26:38.532 INFO 5 --- [ main] com.hazelcast.system : [10.244.77.3]:5701 [someGroup] [3.5] Copyright (c) 2008-2015, Hazelcast, Inc. All Rights Reserved.
|
||||
2015-07-10 13:26:38.533 INFO 5 --- [ main] com.hazelcast.instance.Node : [10.244.77.3]:5701 [someGroup] [3.5] Creating TcpIpJoiner
|
||||
2015-07-10 13:26:38.534 INFO 5 --- [ main] com.hazelcast.core.LifecycleService : [10.244.77.3]:5701 [someGroup] [3.5] Address[10.244.77.3]:5701 is STARTING
|
||||
2015-07-10 13:26:38.672 INFO 5 --- [ cached1] com.hazelcast.nio.tcp.SocketConnector : [10.244.77.3]:5701 [someGroup] [3.5] Connecting to /10.244.37.3:5701, timeout: 0, bind-any: true
|
||||
2015-07-10 13:26:38.683 INFO 5 --- [ cached1] c.h.nio.tcp.TcpIpConnectionManager : [10.244.77.3]:5701 [someGroup] [3.5] Established socket connection between /10.244.77.3:59951
|
||||
2015-07-10 13:26:45.699 INFO 5 --- [ration.thread-1] com.hazelcast.cluster.ClusterService : [10.244.77.3]:5701 [someGroup] [3.5]
|
||||
|
||||
Members [2] {
|
||||
Member [10.244.90.3]:5701
|
||||
Member [10.244.66.2]:5701 this
|
||||
Member [10.244.37.3]:5701
|
||||
Member [10.244.77.3]:5701 this
|
||||
}
|
||||
|
||||
2015-05-09 22:06:31.177 INFO 5 --- [ main] com.hazelcast.core.LifecycleService : [10.244.66.2]:5701 [someGroup] [3.4.2] Address[10.244.66.2]:5701 is STARTED
|
||||
2015-07-10 13:26:47.722 INFO 5 --- [ main] com.hazelcast.core.LifecycleService : [10.244.77.3]:5701 [someGroup] [3.5] Address[10.244.77.3]:5701 is STARTED
|
||||
2015-07-10 13:26:47.723 INFO 5 --- [ main] com.github.pires.hazelcast.Application : Started Application in 13.792 seconds (JVM running for 14.542)
|
||||
```
|
||||
|
||||
Now let's scale our cluster to 4 nodes:
|
||||
|
||||
```sh
|
||||
$ kubectl scale rc hazelcast --replicas=4
|
||||
```
|
||||
|
||||
Examine the status again by checking a node’s log and you should see the 4 members connected.
|
||||
Examine the status again by checking the logs and you should see the 4 members connected.
|
||||
|
||||
### tl; dr;
|
||||
|
||||
For those of you who are impatient, here is the summary of the commands we ran in this tutorial.
|
||||
|
||||
```sh
|
||||
# create a service to track all hazelcast nodes
|
||||
kubectl create -f hazelcast-service.yaml
|
||||
kubectl create -f examples/hazelcast/hazelcast-service.yaml
|
||||
|
||||
# create a replication controller to replicate hazelcast nodes
|
||||
kubectl create -f hazelcast-controller.yaml
|
||||
kubectl create -f examples/hazelcast/hazelcast-controller.yaml
|
||||
|
||||
# scale up to 2 nodes
|
||||
kubectl scale rc hazelcast --replicas=2
|
||||
@@ -206,4 +269,7 @@ kubectl scale rc hazelcast --replicas=4
|
||||
|
||||
See [here](https://github.com/pires/hazelcast-kubernetes-bootstrapper/blob/master/src/main/java/com/github/pires/hazelcast/HazelcastDiscoveryController.java)
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -16,12 +16,16 @@ spec:
|
||||
containers:
|
||||
- resources:
|
||||
limits:
|
||||
cpu: 0.5
|
||||
image: quay.io/pires/hazelcast-kubernetes:0.3.1
|
||||
cpu: 0.1
|
||||
image: quay.io/pires/hazelcast-kubernetes:0.5
|
||||
name: hazelcast
|
||||
env:
|
||||
- name: "DNS_DOMAIN"
|
||||
value: "cluster.local"
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- containerPort: 5701
|
||||
name: hazelcast
|
||||
|
@@ -1,11 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
metadata:
|
||||
labels:
|
||||
name: hazelcast
|
||||
name: hazelcast
|
||||
spec:
|
||||
ports:
|
||||
- port: 5701
|
||||
selector:
|
||||
selector:
|
||||
name: hazelcast
|
||||
|
@@ -5,7 +5,7 @@ MAINTAINER Paulo Pires <pjpires@gmail.com>
|
||||
EXPOSE 5701
|
||||
|
||||
RUN \
|
||||
curl -Lskj https://github.com/pires/hazelcast-kubernetes-bootstrapper/releases/download/0.3.1/hazelcast-kubernetes-bootstrapper-0.3.1.jar \
|
||||
curl -Lskj https://github.com/pires/hazelcast-kubernetes-bootstrapper/releases/download/0.5/hazelcast-kubernetes-bootstrapper-0.5.jar \
|
||||
-o /bootstrapper.jar
|
||||
|
||||
CMD java -jar /bootstrapper.jar
|
46
examples/high-availability/Vagrantfile
vendored
Normal file
46
examples/high-availability/Vagrantfile
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
|
||||
fedora = 'hansode/fedora-21-server-x86_64'
|
||||
script = "provision.sh"
|
||||
config.hostmanager.enabled = true
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-cachier")
|
||||
config.cache.scope = :box
|
||||
end
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 3000
|
||||
v.cpus = 1
|
||||
end
|
||||
|
||||
config.vm.define "kube0" do |kube0|
|
||||
kube0.vm.box = fedora
|
||||
kube0.vm.hostname = "kube0.ha"
|
||||
kube0.vm.synced_folder ".", "/vagrant"
|
||||
kube0.vm.network :private_network, ip: "192.168.4.100"
|
||||
kube0.vm.provision "shell", path:script
|
||||
end
|
||||
|
||||
config.vm.define "kube1" do |kube1|
|
||||
kube1.vm.box = fedora
|
||||
kube1.vm.hostname = "kube1.ha"
|
||||
kube1.vm.synced_folder ".", "/vagrant"
|
||||
kube1.vm.network :private_network, ip: "192.168.4.101"
|
||||
kube1.vm.provision "shell", path:script
|
||||
end
|
||||
|
||||
config.vm.define "kube2" do |kube2|
|
||||
kube2.vm.box = fedora
|
||||
kube2.vm.hostname = "kube2.ha"
|
||||
kube2.vm.network :private_network, ip: "192.168.4.102"
|
||||
kube2.vm.synced_folder ".", "/vagrant"
|
||||
kube2.vm.provision "shell", path:script
|
||||
end
|
||||
|
||||
end
|
18
examples/high-availability/apiserver.crt
Normal file
18
examples/high-availability/apiserver.crt
Normal file
@@ -0,0 +1,18 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIC+zCCAeWgAwIBAgIBATALBgkqhkiG9w0BAQswHzEdMBsGA1UEAwwUMTAuMC4y
|
||||
LjE1QDE0MzQ1NTU1NTkwHhcNMTUwNjE3MTUzOTE5WhcNMTYwNjE2MTUzOTE5WjAf
|
||||
MR0wGwYDVQQDDBQxMC4wLjIuMTVAMTQzNDU1NTU1OTCCASIwDQYJKoZIhvcNAQEB
|
||||
BQADggEPADCCAQoCggEBAK3OFQlqz04iuOtmSIlbJkeTwecL+p8tdtmG9SRn4Fw6
|
||||
TeuuoLCiSqjCZGLV1pKiL6fcjPYWsHoUNIzTtb6E/gj9OfGgQuIqZWRjM3blBmik
|
||||
aZ7N7OwJ5SZy6e5wFtNJ08xRnDZjhOIhtSjPQHk0WsC3hKJav3rGNdh7C53LUiWB
|
||||
uL3ne8oWaiTI9vlgW0ZWx6LcSa0U4jXftwdzLPLbB5INYrz9chF1hpulYnPWY1UA
|
||||
GE6wJTEpQM0p88Ye1t8Ey5QRWp6tjxVfxDYScxSP6FS8Dcj36RF9+5zGYcQ1YbRC
|
||||
Hc1hq7k33H6k5uUp+iPofezG9v4xhWqPkNV6LPxB9k8CAwEAAaNGMEQwDgYDVR0P
|
||||
AQH/BAQDAgCgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDwYD
|
||||
VR0RBAgwBocECgACDzALBgkqhkiG9w0BAQsDggEBAFAzOeP67fKtHH2t114lSvvD
|
||||
2wMj7YjSDLyp3xRAMqtAiQ2DXytXvJ0nG8HcI8rnYYx/0RhibpSM565KjMk3VKhV
|
||||
FMYBU5BgFmR84TmCtLeZe4szr817A1Bbr25djMLQgHtEhtA0NptmjrzSdJICXeXe
|
||||
ih29/5HCxELlbDl7Alb8C8ITQlWsVQpUyr2W5tPp2w1wUA5OD1jJAdQquOHG/lWn
|
||||
4JC/4Out213CNCRh9dZFQsIy0oVUIncqspfj7v+xxVmQMeMqu1H92e5NFIqzfKaV
|
||||
cL5lSqZj2tOKS4fKPqadZ6IBxOZVr28THCUlhbWwDrLEMk8Vu7W+iuhrl8Jthws=
|
||||
-----END CERTIFICATE-----
|
27
examples/high-availability/apiserver.key
Normal file
27
examples/high-availability/apiserver.key
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEArc4VCWrPTiK462ZIiVsmR5PB5wv6ny122Yb1JGfgXDpN666g
|
||||
sKJKqMJkYtXWkqIvp9yM9hawehQ0jNO1voT+CP058aBC4iplZGMzduUGaKRpns3s
|
||||
7AnlJnLp7nAW00nTzFGcNmOE4iG1KM9AeTRawLeEolq/esY12HsLnctSJYG4ved7
|
||||
yhZqJMj2+WBbRlbHotxJrRTiNd+3B3Ms8tsHkg1ivP1yEXWGm6Vic9ZjVQAYTrAl
|
||||
MSlAzSnzxh7W3wTLlBFanq2PFV/ENhJzFI/oVLwNyPfpEX37nMZhxDVhtEIdzWGr
|
||||
uTfcfqTm5Sn6I+h97Mb2/jGFao+Q1Xos/EH2TwIDAQABAoIBAAN4BXt2okSQ9Nfd
|
||||
5ry3EQCEtm4CEzJyGdkllGbxm8y7bSqzBYoA8zSDDLXcRrgOfJ0BDd8rfMIy/QmC
|
||||
gDIZPWi4hRuJc0xIRFXIlRJeZ85W2bTNr1jWqbgmuvyDSDFXW97MEi4Ttmc8/Pyf
|
||||
hk3k+Zq3DvKelQvCQPjdG9I+aJlPAQ9jRpseiXADgxgGJRjMrNNvDmAXSy+vD/PA
|
||||
MzIPcmW48nQ6kvXs6UdRXmfZD8CySQnSMN+pOMzikN9bbyrPHnKxNzImsKSCpN78
|
||||
Uh8eNItDJmMLWv/SwnVS8/X5wMxRQ2t5vuGRnWCQEc+rLtw2mAkj36GnfFpZvSNd
|
||||
fVuVbCECgYEAygCErUVxFUgfBgrXDZa9VEiYKnuQBdrgKOH5JQ/Pifp9BuhNtLvH
|
||||
fHZ16cesZnQ8q+U4nUbnu0s4Gpl0RS96KcwJ3EjGPknclZoVZVPB6Ece/7JaaZcA
|
||||
OQuRRkYABJRPIcpPCtgeJO+OL6H3BFmvteT8GTrek6RY8wrw65nIPu0CgYEA3EP5
|
||||
guS3OoclBvFs5o2WyKokc+kq+L0zS9WX/Hv4nK2c2aS628TfhDqWeTeettuq0Jlf
|
||||
hGvNkNaehQfPpyrJzHwoATMWhi/cKM9sycC9oEFX1tuPAZit2gl+cjXZOX19msp6
|
||||
Sh1I5VKGM8pxGFrE3gDDq1woRr+Ke+lWOiDz5qsCgYBMhSm9YYLW5v0pks2oTiPm
|
||||
W6GY5jnGngUwN3njujuKLyNRjIpzHncRBObh6ku6e+nHzAtIOOXrozDbkqni03tZ
|
||||
fft2QPMoAV7YJQhZ3AKmdNqfTfbF7PeepG0hy76R/YSEbljG6NtybnTUQmyKb1cK
|
||||
dnWxMQXDtAwl9U0SUqzyeQKBgGANWGpHGMvyESiE8WtcxSs+XuUZAf80aoEgZMXa
|
||||
veB9KRAT8NRTdvEvp1y274RoKIYMzAVwCVWm8burW1dXpmaAoeVcBO+BQW2efp9A
|
||||
aLDQreBpIGSe0vlo+HYPm2mhmrt8nnVhbd9q7FD7b/Qh6QWyqaE5b+riLh648zwo
|
||||
EJQ/AoGBAKpDzePHNptD8zZCi/LEjPGeI2HPSiDs7/s6591l5gbSvfRtWyyRtDk3
|
||||
jRgbOAqjF3Eow+QOA1GNGaSYWoANBmhKuUwn3ETzsmQ8UFSj/Wmc3IohhYZtrh6h
|
||||
e0T8VGFcS6bg5OLbYfarzdaI+hL7zlOhjDAgc9E8rjYgBIvb8h9n
|
||||
-----END RSA PRIVATE KEY-----
|
21
examples/high-availability/etc_kubernetes_kubelet
Normal file
21
examples/high-availability/etc_kubernetes_kubelet
Normal file
@@ -0,0 +1,21 @@
|
||||
###
|
||||
# kubernetes kubelet config
|
||||
|
||||
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
|
||||
KUBELET_ADDRESS=""
|
||||
#--address=0.0.0.0"
|
||||
|
||||
# The port for the info server to serve on
|
||||
# KUBELET_PORT="--port=10250"
|
||||
|
||||
# You may leave this blank to use the actual hostname
|
||||
# KUBELET_HOSTNAME="--hostname_override=0.0.0."
|
||||
|
||||
# location of the api-server
|
||||
KUBELET_API_SERVER="--api_servers=http://0.0.0.0:8080,kube1.ha:8080,kube0.ha:8080 "
|
||||
# --cert-dir="/var/run/kubernetes": The directory where the TLS certs are located (by default /var/run/kubernetes). If --tls_cert_file and --tls_private_key_file are provided, this flag will be ignored.
|
||||
# --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If --tls_cert_file and --tls_private_key_file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert_dir.
|
||||
# --tls-private-key-file="": File containing x509 private key matching --tls_cert_file.
|
||||
|
||||
# We modify kubelet args to do verbose logs + read from manifests dir.
|
||||
KUBELET_ARGS="--tls-cert-file=/vagrant/apiserver.crt --tls-private-key-file=/vagrant/apiserver.key --register-node=true --v=5 --config=/etc/kubernetes/manifests --kubeconfig=/vagrant/kubeconfig"
|
97
examples/high-availability/etcd.manifest
Normal file
97
examples/high-availability/etcd.manifest
Normal file
@@ -0,0 +1,97 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"etcd-server"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "etcd-container",
|
||||
"image": "quay.io/coreos/etcd",
|
||||
"command": [
|
||||
"/etcd","--name","NODE_NAME",
|
||||
"--initial-advertise-peer-urls", "http://NODE_IP:2380",
|
||||
"--listen-peer-urls", "http://NODE_IP:2380",
|
||||
"--advertise-client-urls", "http://NODE_IP:2379",
|
||||
"-initial-cluster", "kube0.ha=http://192.168.4.100:2380",
|
||||
"--listen-client-urls", "http://127.0.0.1:2379,http://NODE_IP:2379",
|
||||
"--data-dir","/var/etcd/data"
|
||||
],
|
||||
"ports":[
|
||||
{
|
||||
"name": "serverport", "containerPort": 2380, "hostPort": 2380
|
||||
},
|
||||
{
|
||||
"name": "clientport","containerPort": 4001, "hostPort": 4001
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "varetcd",
|
||||
"mountPath": "/var/etcd",
|
||||
"readOnly": false},
|
||||
{ "name": "etcssl",
|
||||
"mountPath": "/etc/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrsharessl",
|
||||
"mountPath": "/usr/share/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "varssl",
|
||||
"mountPath": "/var/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrssl",
|
||||
"mountPath": "/usr/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlibssl",
|
||||
"mountPath": "/usr/lib/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"mountPath": "/usr/local/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcopenssl",
|
||||
"mountPath": "/etc/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcpkitls",
|
||||
"mountPath": "/etc/pki/tls",
|
||||
"readOnly": true}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "varetcd",
|
||||
"hostPath": {
|
||||
"path": "/var/etcd/data"}
|
||||
},
|
||||
{ "name": "etcssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/pki/tls/certs"}
|
||||
},
|
||||
{ "name": "usrsharessl",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/ssl"}
|
||||
},
|
||||
{ "name": "varssl",
|
||||
"hostPath": {
|
||||
"path": "/var/ssl"}
|
||||
},
|
||||
{ "name": "usrssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/ssl"}
|
||||
},
|
||||
{ "name": "usrlibssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/lib/ssl"}
|
||||
},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/local/openssl"}
|
||||
},
|
||||
{ "name": "etcopenssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/openssl"}
|
||||
},
|
||||
{ "name": "etcpkitls",
|
||||
"hostPath": {
|
||||
"path": "/etc/pki/tls"}
|
||||
}
|
||||
]
|
||||
}}
|
103
examples/high-availability/kube-apiserver.manifest
Normal file
103
examples/high-availability/kube-apiserver.manifest
Normal file
@@ -0,0 +1,103 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"kube-apiserver"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "kube-apiserver",
|
||||
"image": "gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02",
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/kube-apiserver --address=0.0.0.0 --etcd_servers=http://kube0.ha:2379 --service-cluster-ip-range=10.0.0.0/16 --v=4 --allow_privileged=True 1>>/var/log/kube-apiserver.log 2>&1"
|
||||
],
|
||||
"ports":[
|
||||
{ "name": "https",
|
||||
"containerPort": 443,
|
||||
"hostPort": 443},{
|
||||
"name": "http",
|
||||
"containerPort": 7080,
|
||||
"hostPort": 7080},{
|
||||
"name": "local",
|
||||
"containerPort": 8080,
|
||||
"hostPort": 8080}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "srvkube",
|
||||
"mountPath": "/srv/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "logfile",
|
||||
"mountPath": "/var/log/kube-apiserver.log",
|
||||
"readOnly": false},
|
||||
{ "name": "etcssl",
|
||||
"mountPath": "/etc/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrsharessl",
|
||||
"mountPath": "/usr/share/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "varssl",
|
||||
"mountPath": "/var/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrssl",
|
||||
"mountPath": "/usr/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlibssl",
|
||||
"mountPath": "/usr/lib/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"mountPath": "/usr/local/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcopenssl",
|
||||
"mountPath": "/etc/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcpkitls",
|
||||
"mountPath": "/etc/pki/tls",
|
||||
"readOnly": true}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/kube-apiserver.log"}
|
||||
},
|
||||
{ "name": "etcssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/ssl"}
|
||||
},
|
||||
{ "name": "usrsharessl",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/ssl"}
|
||||
},
|
||||
{ "name": "varssl",
|
||||
"hostPath": {
|
||||
"path": "/var/ssl"}
|
||||
},
|
||||
{ "name": "usrssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/ssl"}
|
||||
},
|
||||
{ "name": "usrlibssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/lib/ssl"}
|
||||
},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/local/openssl"}
|
||||
},
|
||||
{ "name": "etcopenssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/openssl"}
|
||||
},
|
||||
{ "name": "etcpkitls",
|
||||
"hostPath": {
|
||||
"path": "/etc/pki/tls"}
|
||||
}
|
||||
]
|
||||
}}
|
100
examples/high-availability/kube-controller-manager.manifest
Normal file
100
examples/high-availability/kube-controller-manager.manifest
Normal file
@@ -0,0 +1,100 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"kube-controller-manager"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "kube-controller-manager",
|
||||
"image": "gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793",
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/kube-controller-manager --master=192.168.4.102:8080 --service_account_private_key_file=/srv/kubernetes/server.key --v=4 1>>/var/log/kube-controller-manager.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/healthz",
|
||||
"port": 10252
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 1
|
||||
},
|
||||
"volumeMounts": [
|
||||
{ "name": "srvkube",
|
||||
"mountPath": "/srv/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "logfile",
|
||||
"mountPath": "/var/log/kube-controller-manager.log",
|
||||
"readOnly": false},
|
||||
{ "name": "etcssl",
|
||||
"mountPath": "/etc/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrsharessl",
|
||||
"mountPath": "/usr/share/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "varssl",
|
||||
"mountPath": "/var/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrssl",
|
||||
"mountPath": "/usr/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlibssl",
|
||||
"mountPath": "/usr/lib/ssl",
|
||||
"readOnly": true},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"mountPath": "/usr/local/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcopenssl",
|
||||
"mountPath": "/etc/openssl",
|
||||
"readOnly": true},
|
||||
{ "name": "etcpkitls",
|
||||
"mountPath": "/etc/pki/tls",
|
||||
"readOnly": true}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "srvkube",
|
||||
"hostPath": {
|
||||
"path": "/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/kube-controller-manager.log"}
|
||||
},
|
||||
{ "name": "etcssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/ssl"}
|
||||
},
|
||||
{ "name": "usrsharessl",
|
||||
"hostPath": {
|
||||
"path": "/usr/share/ssl"}
|
||||
},
|
||||
{ "name": "varssl",
|
||||
"hostPath": {
|
||||
"path": "/var/ssl"}
|
||||
},
|
||||
{ "name": "usrssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/ssl"}
|
||||
},
|
||||
{ "name": "usrlibssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/lib/ssl"}
|
||||
},
|
||||
{ "name": "usrlocalopenssl",
|
||||
"hostPath": {
|
||||
"path": "/usr/local/openssl"}
|
||||
},
|
||||
{ "name": "etcopenssl",
|
||||
"hostPath": {
|
||||
"path": "/etc/openssl"}
|
||||
},
|
||||
{ "name": "etcpkitls",
|
||||
"hostPath": {
|
||||
"path": "/etc/pki/tls"}
|
||||
}
|
||||
]
|
||||
}}
|
39
examples/high-availability/kube-scheduler.manifest
Normal file
39
examples/high-availability/kube-scheduler.manifest
Normal file
@@ -0,0 +1,39 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"kube-scheduler"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "kube-scheduler",
|
||||
"image": "gcr.io/google_containers/kube-scheduler:34d0b8f8b31e27937327961528739bc9",
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/kube-scheduler --master=127.0.0.1:8080 --v=2 1>>/var/log/kube-scheduler.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
"path": "/healthz",
|
||||
"port": 10251
|
||||
},
|
||||
"initialDelaySeconds": 15,
|
||||
"timeoutSeconds": 1
|
||||
},
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "logfile",
|
||||
"mountPath": "/var/log/kube-scheduler.log",
|
||||
"readOnly": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/kube-scheduler.log"}
|
||||
}
|
||||
]
|
||||
}}
|
2
examples/high-availability/kubeconfig
Executable file
2
examples/high-availability/kubeconfig
Executable file
@@ -0,0 +1,2 @@
|
||||
{
|
||||
}
|
57
examples/high-availability/podmaster.json
Normal file
57
examples/high-availability/podmaster.json
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"scheduler-master"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "scheduler-elector",
|
||||
"image": "gcr.io/google_containers/podmaster:1.1",
|
||||
"command": [
|
||||
"/podmaster",
|
||||
"--etcd-servers=http://192.168.4.100:2379,http://192.168.4.101:2379,http://192.168.4.102:2379",
|
||||
"--key=scheduler",
|
||||
"--source-file=/kubernetes/kube-scheduler.manifest",
|
||||
"--dest-file=/manifests/kube-scheduler.manifest"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "k8s",
|
||||
"mountPath": "/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "manifests",
|
||||
"mountPath": "/manifests",
|
||||
"readOnly": false}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "controller-manager-elector",
|
||||
"image": "gcr.io/google_containers/podmaster:1.1",
|
||||
"command": [
|
||||
"/podmaster",
|
||||
"--etcd-servers=http://192.168.4.101:2379,http://192.168.4.102:2379,http://192.168.4.100:2379",
|
||||
"--key=controller",
|
||||
"--source-file=/kubernetes/kube-controller-manager.manifest",
|
||||
"--dest-file=/manifests/kube-controller-manager.manifest"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "k8s",
|
||||
"mountPath": "/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "manifests",
|
||||
"mountPath": "/manifests",
|
||||
"readOnly": false}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "k8s",
|
||||
"hostPath": {
|
||||
"path": "/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "manifests",
|
||||
"hostPath": {
|
||||
"path": "/etc/kubernetes/manifests"}
|
||||
}
|
||||
]
|
||||
}}
|
57
examples/high-availability/podmaster.manifest
Normal file
57
examples/high-availability/podmaster.manifest
Normal file
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {"name":"scheduler-master"},
|
||||
"spec":{
|
||||
"hostNetwork": true,
|
||||
"containers":[
|
||||
{
|
||||
"name": "scheduler-elector",
|
||||
"image": "gcr.io/google_containers/podmaster:1.1",
|
||||
"command": [
|
||||
"/podmaster",
|
||||
"--etcd-servers=http://127.0.0.1:4001",
|
||||
"--key=scheduler",
|
||||
"--source-file=/kubernetes/kube-scheduler.manifest",
|
||||
"--dest-file=/manifests/kube-scheduler.manifest"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "k8s",
|
||||
"mountPath": "/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "manifests",
|
||||
"mountPath": "/manifests",
|
||||
"readOnly": false}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "controller-manager-elector",
|
||||
"image": "gcr.io/google_containers/podmaster:1.1",
|
||||
"command": [
|
||||
"/podmaster",
|
||||
"--etcd-servers=http://127.0.0.1:4001",
|
||||
"--key=controller",
|
||||
"--source-file=/kubernetes/kube-controller-manager.manifest",
|
||||
"--dest-file=/manifests/kube-controller-manager.manifest"
|
||||
],
|
||||
"volumeMounts": [
|
||||
{ "name": "k8s",
|
||||
"mountPath": "/kubernetes",
|
||||
"readOnly": true},
|
||||
{ "name": "manifests",
|
||||
"mountPath": "/manifests",
|
||||
"readOnly": false}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "k8s",
|
||||
"hostPath": {
|
||||
"path": "/srv/kubernetes"}
|
||||
},
|
||||
{ "name": "manifests",
|
||||
"hostPath": {
|
||||
"path": "/etc/kubernetes/manifests"}
|
||||
}
|
||||
]
|
||||
}}
|
44
examples/high-availability/provision-flannel.sh
Executable file
44
examples/high-availability/provision-flannel.sh
Executable file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
function setup_flannel {
|
||||
yum install -y flannel
|
||||
|
||||
### Write this k/v to etcd. Flannel will grab it to setup its networking.
|
||||
curl --silent -s -L http://kube0.ha:2379/v2/keys/coreos.com/network/config -XPUT -d value='{"Network": "172.31.255.0/24", "SubnetLen": 27, "Backend": {"Type": "vxlan"}}'
|
||||
|
||||
### Write flannel etcd file
|
||||
cat >> /etc/sysconfig/flanneld << EOF
|
||||
FLANNEL_ETCD="http://kube0.ha:2379"
|
||||
FLANNEL_ETCD_KEY="/coreos.com/network"
|
||||
FLANNEL_OPTIONS="--iface=eth1"
|
||||
EOF
|
||||
}
|
||||
|
||||
echo "now setting up flannel. Assuming etcd is online!"
|
||||
setup_flannel
|
||||
sudo service flanneld restart
|
||||
sudo ip link delete docker0
|
||||
sudo service docker restart
|
||||
|
||||
### This should restart etcd and all the others
|
||||
### The pods will now have a default ip for docker containers which
|
||||
### runs inside of the kube network.
|
||||
sudo systemctl restart kubelet
|
181
examples/high-availability/provision.sh
Executable file
181
examples/high-availability/provision.sh
Executable file
@@ -0,0 +1,181 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
# Grep the whole IP because otherwise sometimes vagrant attaches extra mystery dynamic IPs to eth1.
|
||||
IP=`ip -o addr | grep '192.168.4' | cut -d' ' -f 7 | cut -d'/' -f 1`
|
||||
|
||||
echo "Using IP $IP for this machine."
|
||||
|
||||
function initialize {
|
||||
systemctl disable iptables-services firewalld
|
||||
echo "disabling selinux"
|
||||
(setenforce 0 || echo "selinux might already be disabled...")
|
||||
yum install -y docker
|
||||
|
||||
### Important : The kube version MUST match the containers in the manifests.
|
||||
### Otherwise lots of API errors.
|
||||
yum install -y http://cbs.centos.org/kojifiles/packages/kubernetes/0.17.1/3.el7/x86_64/kubernetes-node-0.17.1-3.el7.x86_64.rpm
|
||||
mkdir -p -m 777 /etc/kubernetes/manifests
|
||||
### just to make it easy to hack around as non root user
|
||||
groupadd docker
|
||||
gpasswd -a vagrant docker
|
||||
systemctl restart docker
|
||||
}
|
||||
|
||||
function start_kubelet {
|
||||
systemctl enable docker
|
||||
### We need a custom unit file with the --config/ option
|
||||
cp /vagrant/etc_kubernetes_kubelet /etc/kubernetes/kubelet
|
||||
systemctl enable kubelet
|
||||
### Not sure why, but this restart is required?
|
||||
sleep 2
|
||||
systemctl restart kubelet
|
||||
}
|
||||
|
||||
### Not the best idea if using flannel. Because of the circular dependency.
|
||||
function write_etcd_manifest {
|
||||
|
||||
### I know this looks fancy, but
|
||||
### Basically, this is just setting up ETCD config file w/ IP Addresses
|
||||
cat /vagrant/etcd.manifest | \
|
||||
sed "s/NODE_NAME/`hostname`/g" | \
|
||||
sed "s/NODE_IP/$IP/g" > /etc/kubernetes/manifests/etcd.manifest
|
||||
}
|
||||
|
||||
### Test of ETCD Members.
|
||||
|
||||
function test_etcd {
|
||||
|
||||
echo "----------- DEBUG ------------ KUBELET LOGS -----------------"
|
||||
( journalctl -u kubelet | grep -A 20 -B 20 Fail || echo "no failure in logs")
|
||||
echo "----------- END DEBUG OF KUBELET ----------------------------"
|
||||
|
||||
( curl http://kube0.ha:2379 > /tmp/curl_output || echo "failed etcd!!!" )
|
||||
if [ -s /tmp/curl_output ]; then
|
||||
echo "etcd success"
|
||||
else
|
||||
echo "etcd failure. exit!"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
function k8petstore {
|
||||
### run K8petstore . Should work perfectly IFF flannel and so on is setup properly.
|
||||
wget https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/release-0.17/examples/k8petstore/k8petstore.sh
|
||||
chmod 777 k8petstore.sh
|
||||
./k8petstore.sh
|
||||
}
|
||||
|
||||
function write_api_server_config {
|
||||
touch /var/log/kube-apiserver.log
|
||||
mkdir -p -m 777 /srv/kubernetes/
|
||||
|
||||
### We will move files back and forth between the /srv/kube.. directory.
|
||||
### That is how we modulate leader. Each node will continously either
|
||||
### ensure that the manifests are in this dir, or else, are in the kubelet manifest dir.
|
||||
cp /vagrant/kube-scheduler.manifest /vagrant/kube-controller-manager.manifest /srv/kubernetes
|
||||
|
||||
### All nodes will run an API Server. This is because API Server is stateless, so its not a problem
|
||||
### To serve it up everywhere.
|
||||
cp /vagrant/kube-apiserver.manifest /etc/kubernetes/manifests/
|
||||
}
|
||||
|
||||
function write_podmaster_config {
|
||||
touch /var/log/kube-scheduler.log
|
||||
touch /var/log/kube-controller-manager.log
|
||||
|
||||
### These DO NOT go in manifest. Instead, we mount them here.
|
||||
### We let podmaster swap these in and out of the manifests directory
|
||||
### based on its own internal HA logic.
|
||||
cp /vagrant/kube-controller-manager.manifest /srv/kubernetes/
|
||||
cp /vagrant/kube-scheduler.manifest /srv/kubernetes/
|
||||
|
||||
#### Finally, the podmaster is the mechanism for election
|
||||
cp /vagrant/podmaster.json /etc/kubernetes/manifests/
|
||||
}
|
||||
|
||||
function poll {
|
||||
### wait 10 minutes for kube-apiserver to come online
|
||||
for i in `seq 1 600`
|
||||
do
|
||||
sleep 2
|
||||
echo $i
|
||||
### Just testing that the front end comes up. Not sure how to test total entries etc... (yet)
|
||||
( curl "localhost:8080" > result || echo "failed on attempt $i, retrying again.. api not up yet. " )
|
||||
( cat result || echo "no result" )
|
||||
if ( cat result | grep -q api ) ; then
|
||||
break
|
||||
else
|
||||
echo "continue.."
|
||||
fi
|
||||
done
|
||||
if [ $i == 600 ]; then
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
function install_components {
|
||||
### etcd node - this node only runs etcd in a kubelet, no flannel.
|
||||
### we dont want circular dependency of docker -> flannel -> etcd -> docker
|
||||
if [ "`hostname`" == "kube0.ha" ]; then
|
||||
write_etcd_manifest
|
||||
start_kubelet
|
||||
|
||||
### precaution to make sure etcd is writable, flush iptables.
|
||||
iptables -F
|
||||
### nodes: these will each run their own api server.
|
||||
else
|
||||
### Make sure etcd running, flannel needs it.
|
||||
test_etcd
|
||||
start_kubelet
|
||||
|
||||
### Flannel setup...
|
||||
### This will restart the kubelet and docker and so on...
|
||||
/vagrant/provision-flannel.sh
|
||||
|
||||
echo "Now pulling down flannel nodes. "
|
||||
curl -L http://kube0.ha:2379/v2/keys/coreos.com/network/subnets | python -mjson.tool
|
||||
|
||||
echo " Inspect the above lines carefully ^."
|
||||
### All nodes run api server
|
||||
write_api_server_config
|
||||
|
||||
### controller-manager will turn on and off
|
||||
### and same for kube-scheduler
|
||||
write_podmaster_config
|
||||
|
||||
# finally, for us to creaet public ips for k8petstore etc, we need the proxy running.
|
||||
service kube-proxy start
|
||||
service kube-proxy status
|
||||
fi
|
||||
}
|
||||
|
||||
initialize
|
||||
install_components
|
||||
iptables -F
|
||||
|
||||
if [ "`hostname`" == "kube2.ha" ]; then
|
||||
poll
|
||||
k8petstore
|
||||
fi
|
||||
|
||||
echo "ALL DONE!"
|
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
# Copyright 2015 The Kubernetes Authors. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@@ -12,6 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM kubernetes/test-webserver
|
||||
COPY html/nautilus.jpg nautilus.jpg
|
||||
COPY html/data.json data.json
|
||||
FROM nginx
|
||||
MAINTAINER Prashanth B <beeps@google.com>
|
||||
COPY default.conf /etc/nginx/conf.d/default.conf
|
||||
CMD ["nginx", "-g", "daemon off;"]
|
24
examples/https-nginx/Makefile
Normal file
24
examples/https-nginx/Makefile
Normal file
@@ -0,0 +1,24 @@
|
||||
all:
|
||||
|
||||
TAG = 1.0
|
||||
PREFIX = bprashanth/nginxhttps
|
||||
KEY = /tmp/nginx.key
|
||||
CERT = /tmp/nginx.crt
|
||||
SECRET = /tmp/secret.json
|
||||
|
||||
keys:
|
||||
# The CName used here is specific to the service specified in nginx-app.yaml.
|
||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $(KEY) -out $(CERT) -subj "/CN=nginxsvc/O=nginxsvc"
|
||||
|
||||
secret:
|
||||
CGO_ENABLED=0 GOOS=linux go run -a -installsuffix cgo -ldflags '-w' make_secret.go -crt $(CERT) -key $(KEY) > $(SECRET)
|
||||
|
||||
container:
|
||||
docker build -t $(PREFIX):$(TAG) .
|
||||
|
||||
push: container
|
||||
docker push $(PREFIX):$(TAG)
|
||||
|
||||
clean:
|
||||
rm $(KEY)
|
||||
rm $(CERT)
|
99
examples/https-nginx/README.md
Normal file
99
examples/https-nginx/README.md
Normal file
@@ -0,0 +1,99 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/https-nginx/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Nginx https service
|
||||
|
||||
This example creates a basic nginx https service useful in verifying proof of concept, keys, secrets, and end-to-end https service creation in kubernetes.
|
||||
It uses an [nginx server block](http://wiki.nginx.org/ServerBlockExample) to serve the index page over both http and https.
|
||||
|
||||
### Generate certificates
|
||||
|
||||
First generate a self signed rsa key and certificate that the server can use for TLS.
|
||||
|
||||
```sh
|
||||
$ make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json
|
||||
```
|
||||
|
||||
### Create a https nginx application running in a kubernetes cluster
|
||||
|
||||
You need a [running kubernetes cluster](../../docs/getting-started-guides/) for this to work.
|
||||
|
||||
```
|
||||
$ kubectl create -f /tmp/secret.json
|
||||
secrets/nginxsecret
|
||||
|
||||
$ kubectl create -f examples/https-nginx/nginx-app.yaml
|
||||
services/nginxsvc
|
||||
replicationcontrollers/my-nginx
|
||||
|
||||
$ kubectl get svc nginxsvc -o json
|
||||
...
|
||||
{
|
||||
"name": "http",
|
||||
"protocol": "TCP",
|
||||
"port": 80,
|
||||
"targetPort": 80,
|
||||
"nodePort": 30849
|
||||
},
|
||||
{
|
||||
"name": "https",
|
||||
"protocol": "TCP",
|
||||
"port": 443,
|
||||
"targetPort": 443,
|
||||
"nodePort": 30744
|
||||
}
|
||||
...
|
||||
|
||||
$ kubectl get nodes -o json | grep ExternalIP -A 2
|
||||
...
|
||||
"type": "ExternalIP",
|
||||
"address": "104.197.63.17"
|
||||
}
|
||||
--
|
||||
"type": "ExternalIP",
|
||||
"address": "104.154.89.170"
|
||||
}
|
||||
...
|
||||
|
||||
$ curl https://nodeip:30744 -k
|
||||
...
|
||||
<title>Welcome to nginx!</title>
|
||||
...
|
||||
```
|
||||
|
||||
For more information on how to run this in a kubernetes cluster, please see the [user-guide](../../docs/user-guide/connecting-applications.md).
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
17
examples/https-nginx/default.conf
Normal file
17
examples/https-nginx/default.conf
Normal file
@@ -0,0 +1,17 @@
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server ipv6only=on;
|
||||
|
||||
listen 443 ssl;
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
|
||||
server_name localhost;
|
||||
ssl_certificate /etc/nginx/ssl/nginx.crt;
|
||||
ssl_certificate_key /etc/nginx/ssl/nginx.key;
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
}
|
66
examples/https-nginx/make_secret.go
Normal file
66
examples/https-nginx/make_secret.go
Normal file
@@ -0,0 +1,66 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// A small script that converts the given open ssl public/private keys to
|
||||
// a secret that it writes to stdout as json. Most common use case is to
|
||||
// create a secret from self signed certificates used to authenticate with
|
||||
// a devserver. Usage: go run make_secret.go -crt ca.crt -key priv.key > secret.json
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// Add a -o flag that writes to the specified destination file.
|
||||
// Teach the script to create crt and key if -crt and -key aren't specified.
|
||||
var (
|
||||
crt = flag.String("crt", "", "path to nginx certificates.")
|
||||
key = flag.String("key", "", "path to nginx private key.")
|
||||
)
|
||||
|
||||
func read(file string) []byte {
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot read file %v, %v", file, err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *crt == "" || *key == "" {
|
||||
log.Fatalf("Need to specify -crt -key and -template")
|
||||
}
|
||||
nginxCrt := read(*crt)
|
||||
nginxKey := read(*key)
|
||||
secret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "nginxsecret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"nginx.crt": nginxCrt,
|
||||
"nginx.key": nginxKey,
|
||||
},
|
||||
}
|
||||
fmt.Printf(runtime.EncodeOrDie(latest.Codec, secret))
|
||||
}
|
42
examples/https-nginx/nginx-app.yaml
Normal file
42
examples/https-nginx/nginx-app.yaml
Normal file
@@ -0,0 +1,42 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginxsvc
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
name: https
|
||||
selector:
|
||||
app: nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: my-nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: nginxsecret
|
||||
containers:
|
||||
- name: nginxhttps
|
||||
image: bprashanth/nginxhttps:1.0
|
||||
ports:
|
||||
- containerPort: 443
|
||||
- containerPort: 80
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx/ssl
|
||||
name: secret-volume
|
@@ -1,4 +1,38 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/iscsi/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Step 1. Setting up iSCSI target and iSCSI initiator
|
||||
|
||||
**Setup A.** On Fedora 21 nodes
|
||||
|
||||
If you use Fedora 21 on Kubernetes node, then first install iSCSI initiator on the node:
|
||||
@@ -14,7 +48,8 @@ I mostly followed these [instructions](http://www.server-world.info/en/note?os=F
|
||||
|
||||
GCE does not provide preconfigured Fedora 21 image, so I set up the iSCSI target on a preconfigured Ubuntu 12.04 image, mostly following these [instructions](http://www.server-world.info/en/note?os=Ubuntu_12.04&p=iscsi). My Kubernetes cluster on GCE was running Debian 7 images, so I followed these [instructions](http://www.server-world.info/en/note?os=Debian_7.0&p=iscsi&f=2) to set up the iSCSI initiator.
|
||||
|
||||
##Step 2. Creating the pod with iSCSI persistent storage
|
||||
## Step 2. Creating the pod with iSCSI persistent storage
|
||||
|
||||
Once you have installed iSCSI initiator and new Kubernetes, you can create a pod based on my example *iscsi.json*. In the pod JSON, you need to provide *targetPortal* (the iSCSI target's **IP** address and *port* if not the default port 3260), target's *iqn*, *lun*, and the type of the filesystem that has been created on the lun, and *readOnly* boolean.
|
||||
|
||||
**Note:** If you have followed the instructions in the links above you
|
||||
@@ -30,7 +65,7 @@ mkfs.ext4 /dev/<name of device>
|
||||
Once your pod is created, run it on the Kubernetes master:
|
||||
|
||||
```console
|
||||
kubectl create -f your_new_pod.json
|
||||
kubectl create -f ./your_new_pod.json
|
||||
```
|
||||
|
||||
Here is my command and output:
|
||||
@@ -53,6 +88,7 @@ On the Kubernetes node, I got these in mount output
|
||||
```
|
||||
|
||||
If you ssh to that machine, you can run `docker ps` to see the actual pod.
|
||||
|
||||
```console
|
||||
# docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
@@ -61,10 +97,13 @@ cc051196e7af kubernetes/pause:latest "/pause
|
||||
```
|
||||
|
||||
Run *docker inspect* and I found the Containers mounted the host directory into the their */mnt/iscsipd* directory.
|
||||
|
||||
```console
|
||||
# docker inspect --format '{{index .Volumes "/mnt/iscsipd"}}' cc051196e7af
|
||||
/var/lib/kubelet/pods/75e0af2b-f8e8-11e4-9ae7-42010af01964/volumes/kubernetes.io~iscsi/iscsipd-rw
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/k8petstore/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## Welcome to k8PetStore
|
||||
|
||||
This is a follow up to the [Guestbook Example](../guestbook/README.md)'s [Go implementation](../guestbook-go/).
|
||||
@@ -94,7 +127,7 @@ So, to run this app in Kubernetes, simply run [The all in one k8petstore.sh shel
|
||||
|
||||
Note that at the top of the script there are a few self explanatory parameters to set, among which the Public IPs parameter is where you can checkout the web ui (at $PUBLIC_IP:3000), which will show a plot and read outs of transaction throughput.
|
||||
|
||||
In the mean time, because the public IP will be deprecated in Kubernetes v1, we provide other 2 scripts k8petstore-loadbalancer.sh and k8petstore-nodeport.sh. As the names suggest, they rely on LoadBalancer and NodePort respectively. More details can be found [here](https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/services.md#external-services).
|
||||
In the mean time, because the public IP will be deprecated in Kubernetes v1, we provide other 2 scripts k8petstore-loadbalancer.sh and k8petstore-nodeport.sh. As the names suggest, they rely on LoadBalancer and NodePort respectively. More details can be found [here](../../docs/user-guide/services.md#external-services).
|
||||
|
||||
## Future
|
||||
|
||||
@@ -109,4 +142,6 @@ For questions on running this app, you can ask on the google containers group (f
|
||||
For questions about bigpetstore, and how the data is generated, ask on the apache bigtop mailing list.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/k8petstore/bps-data-generator/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# How to generate the bps-data-generator container #
|
||||
|
||||
This container is maintained as part of the apache bigtop project.
|
||||
@@ -15,4 +48,6 @@ then, cd to bigtop-bigpetstore/bigpetstore-transaction-queue, and run the docker
|
||||
`Docker build -t -i jayunit100/bps-transaction-queue`.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -6,13 +6,13 @@
|
||||
|
||||
Now start a local redis instance
|
||||
|
||||
```
|
||||
```sh
|
||||
redis-server
|
||||
```
|
||||
|
||||
And run the app
|
||||
|
||||
```
|
||||
```sh
|
||||
export GOPATH=~/Development/k8hacking/k8petstore/web-server/
|
||||
cd $GOPATH/src/main/
|
||||
## Now, you're in the local dir to run the app. Go get its depenedencies.
|
||||
|
@@ -1,3 +1,35 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/kubectl-container/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
This directory contains a Dockerfile and Makefile for packaging up kubectl into
|
||||
a container.
|
||||
|
||||
@@ -18,4 +50,6 @@ a drop-in replacement for the old no-auth KUBERNETES_RO service. The other
|
||||
containers in your pod will find the proxy apparently serving on localhost.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,245 +0,0 @@
|
||||
## Kubernetes Namespaces
|
||||
|
||||
Kubernetes _[namespaces](../../docs/namespaces.md)_ help different projects, teams, or customers to share a Kubernetes cluster.
|
||||
|
||||
It does this by providing the following:
|
||||
|
||||
1. A scope for [Names](../../docs/identifiers.md).
|
||||
2. A mechanism to attach authorization and policy to a subsection of the cluster.
|
||||
|
||||
Use of multiple namespaces is optional.
|
||||
|
||||
This example demonstrates how to use Kubernetes namespaces to subdivide your cluster.
|
||||
|
||||
### Step Zero: Prerequisites
|
||||
|
||||
This example assumes the following:
|
||||
|
||||
1. You have an [existing Kubernetes cluster](../../docs/getting-started-guides).
|
||||
2. You have a basic understanding of Kubernetes _[pods](../../docs/pods.md)_, _[services](../../docs/services.md)_, and _[replication controllers](../../docs/replication-controller.md)_.
|
||||
|
||||
### Step One: Understand the default namespace
|
||||
|
||||
By default, a Kubernetes cluster will instantiate a default namespace when provisioning the cluster to hold the default set of pods,
|
||||
services, and replication controllers used by the cluster.
|
||||
|
||||
Assuming you have a fresh cluster, you can introspect the available namespace's by doing the following:
|
||||
|
||||
```shell
|
||||
$ kubectl get namespaces
|
||||
NAME LABELS
|
||||
default <none>
|
||||
```
|
||||
|
||||
### Step Two: Create new namespaces
|
||||
|
||||
For this exercise, we will create two additional Kubernetes namespaces to hold our content.
|
||||
|
||||
Let's imagine a scenario where an organization is using a shared Kubernetes cluster for development and production use cases.
|
||||
|
||||
The development team would like to maintain a space in the cluster where they can get a view on the list of pods, services, and replication-controllers
|
||||
they use to build and run their application. In this space, Kubernetes resources come and go, and the restrictions on who can or cannot modify resources
|
||||
are relaxed to enable agile development.
|
||||
|
||||
The operations team would like to maintain a space in the cluster where they can enforce strict procedures on who can or cannot manipulate the set of
|
||||
pods, services, and replication controllers that run the production site.
|
||||
|
||||
One pattern this organization could follow is to partition the Kubernetes cluster into two namespaces: development and production.
|
||||
|
||||
Let's create two new namespaces to hold our work.
|
||||
|
||||
Use the file [`examples/kubernetes-namespaces/namespace-dev.json`](namespace-dev.json) which describes a development namespace:
|
||||
|
||||
```js
|
||||
{
|
||||
"kind": "Namespace",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "development",
|
||||
"labels": {
|
||||
"name": "development"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Create the development namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f examples/kubernetes-namespaces/namespace-dev.json
|
||||
```
|
||||
|
||||
And then lets create the production namespace using kubectl.
|
||||
|
||||
```shell
|
||||
$ kubectl create -f examples/kubernetes-namespaces/namespace-prod.json
|
||||
```
|
||||
|
||||
To be sure things are right, let's list all of the namespaces in our cluster.
|
||||
|
||||
```shell
|
||||
$ kubectl get namespaces
|
||||
NAME LABELS STATUS
|
||||
default <none> Active
|
||||
development name=development Active
|
||||
production name=production Active
|
||||
```
|
||||
|
||||
|
||||
### Step Three: Create pods in each namespace
|
||||
|
||||
A Kubernetes namespace provides the scope for pods, services, and replication controllers in the cluster.
|
||||
|
||||
Users interacting with one namespace do not see the content in another namespace.
|
||||
|
||||
To demonstrate this, let's spin up a simple replication controller and pod in the development namespace.
|
||||
|
||||
We first check what is the current context:
|
||||
|
||||
```shell
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: REDACTED
|
||||
server: https://130.211.122.180
|
||||
name: lithe-cocoa-92103_kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: lithe-cocoa-92103_kubernetes
|
||||
user: lithe-cocoa-92103_kubernetes
|
||||
name: lithe-cocoa-92103_kubernetes
|
||||
current-context: lithe-cocoa-92103_kubernetes
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: lithe-cocoa-92103_kubernetes
|
||||
user:
|
||||
client-certificate-data: REDACTED
|
||||
client-key-data: REDACTED
|
||||
token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b
|
||||
- name: lithe-cocoa-92103_kubernetes-basic-auth
|
||||
user:
|
||||
password: h5M0FtUUIflBSdI7
|
||||
username: admin
|
||||
```
|
||||
|
||||
The next step is to define a context for the kubectl client to work in each namespace. The value of "cluster" and "user" fields are copied from the current context.
|
||||
|
||||
```shell
|
||||
$ kubectl config set-context dev --namespace=development --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes
|
||||
$ kubectl config set-context prod --namespace=production --cluster=lithe-cocoa-92103_kubernetes --user=lithe-cocoa-92103_kubernetes
|
||||
```
|
||||
|
||||
The above commands provided two request contexts you can alternate against depending on what namespace you
|
||||
wish to work against.
|
||||
|
||||
Let's switch to operate in the development namespace.
|
||||
|
||||
```shell
|
||||
$ kubectl config use-context dev
|
||||
```
|
||||
|
||||
You can verify your current context by doing the following:
|
||||
|
||||
```shell
|
||||
$ kubectl config view
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: REDACTED
|
||||
server: https://130.211.122.180
|
||||
name: lithe-cocoa-92103_kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: lithe-cocoa-92103_kubernetes
|
||||
namespace: development
|
||||
user: lithe-cocoa-92103_kubernetes
|
||||
name: dev
|
||||
- context:
|
||||
cluster: lithe-cocoa-92103_kubernetes
|
||||
user: lithe-cocoa-92103_kubernetes
|
||||
name: lithe-cocoa-92103_kubernetes
|
||||
- context:
|
||||
cluster: lithe-cocoa-92103_kubernetes
|
||||
namespace: production
|
||||
user: lithe-cocoa-92103_kubernetes
|
||||
name: prod
|
||||
current-context: dev
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: lithe-cocoa-92103_kubernetes
|
||||
user:
|
||||
client-certificate-data: REDACTED
|
||||
client-key-data: REDACTED
|
||||
token: 65rZW78y8HbwXXtSXuUw9DbP4FLjHi4b
|
||||
- name: lithe-cocoa-92103_kubernetes-basic-auth
|
||||
user:
|
||||
password: h5M0FtUUIflBSdI7
|
||||
username: admin
|
||||
```
|
||||
|
||||
At this point, all requests we make to the Kubernetes cluster from the command line are scoped to the development namespace.
|
||||
|
||||
Let's create some content.
|
||||
|
||||
```shell
|
||||
$ kubectl run snowflake --image=kubernetes/serve_hostname --replicas=2
|
||||
```
|
||||
|
||||
We have just created a replication controller whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname.
|
||||
|
||||
```shell
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
snowflake snowflake kubernetes/serve_hostname run=snowflake 2
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
snowflake-8w0qn 1/1 Running 0 22s
|
||||
snowflake-jrpzb 1/1 Running 0 22s
|
||||
```
|
||||
|
||||
And this is great, developers are able to do what they want, and they do not have to worry about affecting content in the production namespace.
|
||||
|
||||
Let's switch to the production namespace and show how resources in one namespace are hidden from the other.
|
||||
|
||||
```shell
|
||||
$ kubectl config use-context prod
|
||||
```
|
||||
|
||||
The production namespace should be empty.
|
||||
|
||||
```shell
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
```
|
||||
|
||||
Production likes to run cattle, so let's create some cattle pods.
|
||||
|
||||
```shell
|
||||
$ kubectl run cattle --image=kubernetes/serve_hostname --replicas=5
|
||||
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
cattle cattle kubernetes/serve_hostname run=cattle 5
|
||||
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
cattle-97rva 1/1 Running 0 12s
|
||||
cattle-i9ojn 1/1 Running 0 12s
|
||||
cattle-qj3yv 1/1 Running 0 12s
|
||||
cattle-yc7vn 1/1 Running 0 12s
|
||||
cattle-zz7ea 1/1 Running 0 12s
|
||||
```
|
||||
|
||||
At this point, it should be clear that the resources users create in one namespace are hidden from the other namespace.
|
||||
|
||||
As the policy support in Kubernetes evolves, we will extend this scenario to show how you can provide different
|
||||
authorization rules for each namespace.
|
||||
|
||||
|
||||
[]()
|
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"kind": "Namespace",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "development",
|
||||
"labels": {
|
||||
"name": "development"
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"kind": "Namespace",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "production",
|
||||
"labels": {
|
||||
"name": "production"
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,4 +0,0 @@
|
||||
Please refer to this [doc](https://github.com/GoogleCloudPlatform/kubernetes/blob/620af168920b773ade28e27211ad684903a1db21/docs/design/admission_control_limit_range.md#kubectl).
|
||||
|
||||
|
||||
[]()
|
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"apiVersion":"v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "invalid-pod",
|
||||
"labels": {
|
||||
"name": "invalid-pod"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "kubernetes-serve-hostname",
|
||||
"image": "gcr.io/google_containers/serve_hostname",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "10m",
|
||||
"memory": "5Mi"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
@@ -1,37 +0,0 @@
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "LimitRange",
|
||||
"metadata": {
|
||||
"name": "limits"
|
||||
},
|
||||
"spec": {
|
||||
"limits": [
|
||||
{
|
||||
"type": "Pod",
|
||||
"max": {
|
||||
"memory": "1Gi",
|
||||
"cpu": "2"
|
||||
},
|
||||
"min": {
|
||||
"memory": "6Mi",
|
||||
"cpu": "250m"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "Container",
|
||||
"max": {
|
||||
"memory": "1Gi",
|
||||
"cpu": "2"
|
||||
},
|
||||
"min": {
|
||||
"memory": "6Mi",
|
||||
"cpu": "250m"
|
||||
},
|
||||
"default": {
|
||||
"memory": "6Mi",
|
||||
"cpu": "250m"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"apiVersion":"v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "valid-pod",
|
||||
"labels": {
|
||||
"name": "valid-pod"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [{
|
||||
"name": "kubernetes-serve-hostname",
|
||||
"image": "gcr.io/google_containers/serve_hostname",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "1",
|
||||
"memory": "6Mi"
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
}
|
@@ -1,70 +0,0 @@
|
||||
## Overview
|
||||
This example shows two types of pod health checks: HTTP checks and container execution checks.
|
||||
|
||||
The [exec-liveness.yaml](./exec-liveness.yaml) demonstrates the container execution check.
|
||||
```
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- /tmp/health
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
||||
```
|
||||
Kubelet executes the command cat /tmp/health in the container and reports failure if the command returns a non-zero exit code.
|
||||
|
||||
Note that the container removes the /tmp/health file after 10 seconds,
|
||||
```
|
||||
echo ok > /tmp/health; sleep 10; rm -rf /tmp/health; sleep 600
|
||||
```
|
||||
so when Kubelet executes the health check 15 seconds (defined by initialDelaySeconds) after the container started, the check would fail.
|
||||
|
||||
|
||||
The [http-liveness.yaml](http-liveness.yaml) demonstrates the HTTP check.
|
||||
```
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
||||
```
|
||||
The Kubelet sends a HTTP request to the specified path and port to perform the health check. If you take a look at image/server.go, you will see the server starts to respond with an error code 500 after 10 seconds, so the check fails.
|
||||
|
||||
This [guide](../walkthrough/k8s201.md#health-checking) has more information on health checks.
|
||||
|
||||
## Get your hands dirty
|
||||
To show the health check is actually working, first create the pods:
|
||||
```
|
||||
# kubectl create -f exec-liveness.yaml
|
||||
# kubectl create -f http-liveness.yaml
|
||||
```
|
||||
|
||||
Check the status of the pods once they are created:
|
||||
```
|
||||
# kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
[...]
|
||||
liveness-exec 1/1 Running 0 13s
|
||||
liveness-http 1/1 Running 0 13s
|
||||
```
|
||||
Check the status half a minute later, you will see the container restart count being incremented:
|
||||
```
|
||||
# kubectl get pods
|
||||
mwielgus@mwielgusd:~/test/k2/kubernetes/examples/liveness$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
[...]
|
||||
liveness-exec 1/1 Running 1 36s
|
||||
liveness-http 1/1 Running 1 36s
|
||||
```
|
||||
At the bottom of the *kubectl describe* output there are messages indicating that the liveness probes have failed, and the containers have been killed and recreated.
|
||||
|
||||
```
|
||||
# kubectl describe pods liveness-exec
|
||||
[...]
|
||||
Sat, 27 Jun 2015 13:43:03 +0200 Sat, 27 Jun 2015 13:44:34 +0200 4 {kubelet kubernetes-minion-6fbi} spec.containers{liveness} unhealthy Liveness probe failed: cat: can't open '/tmp/health': No such file or directory
|
||||
Sat, 27 Jun 2015 13:44:44 +0200 Sat, 27 Jun 2015 13:44:44 +0200 1 {kubelet kubernetes-minion-6fbi} spec.containers{liveness} killing Killing with docker id 65b52d62c635
|
||||
Sat, 27 Jun 2015 13:44:44 +0200 Sat, 27 Jun 2015 13:44:44 +0200 1 {kubelet kubernetes-minion-6fbi} spec.containers{liveness} created Created with docker id ed6bb004ee10
|
||||
Sat, 27 Jun 2015 13:44:44 +0200 Sat, 27 Jun 2015 13:44:44 +0200 1 {kubelet kubernetes-minion-6fbi} spec.containers{liveness} started Started with docker id ed6bb004ee10
|
||||
```
|
||||
[]()
|
@@ -1,21 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
test: liveness
|
||||
name: liveness-exec
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- echo ok > /tmp/health; sleep 10; rm -rf /tmp/health; sleep 600
|
||||
image: gcr.io/google_containers/busybox
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- cat
|
||||
- /tmp/health
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
||||
name: liveness
|
@@ -1,18 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
test: liveness
|
||||
name: liveness-http
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- /server
|
||||
image: gcr.io/google_containers/liveness
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
||||
name: liveness
|
@@ -1,4 +0,0 @@
|
||||
FROM scratch
|
||||
|
||||
ADD server /server
|
||||
|
@@ -1,13 +0,0 @@
|
||||
all: push
|
||||
|
||||
server: server.go
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./server.go
|
||||
|
||||
container: server
|
||||
docker build -t gcr.io/google_containers/liveness .
|
||||
|
||||
push: container
|
||||
gcloud docker push gcr.io/google_containers/liveness
|
||||
|
||||
clean:
|
||||
rm -f server
|
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// A simple server that is alive for 10 seconds, then reports unhealthy for
|
||||
// the rest of its (hopefully) short existence.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
started := time.Now()
|
||||
http.HandleFunc("/started", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
data := (time.Now().Sub(started)).String()
|
||||
w.Write([]byte(data))
|
||||
})
|
||||
http.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
|
||||
duration := time.Now().Sub(started)
|
||||
if duration.Seconds() > 10 {
|
||||
w.WriteHeader(500)
|
||||
w.Write([]byte(fmt.Sprintf("error: %v", duration.Seconds())))
|
||||
} else {
|
||||
w.WriteHeader(200)
|
||||
w.Write([]byte("ok"))
|
||||
}
|
||||
})
|
||||
log.Fatal(http.ListenAndServe(":8080", nil))
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
# Makefile for launching synthetic logging sources (any platform)
|
||||
# and for reporting the forwarding rules for the
|
||||
# Elasticsearch and Kibana pods for the GCE platform.
|
||||
# For examples of how to observe the ingested logs please
|
||||
# see the appropriate getting started guide e.g.
|
||||
# Google Cloud Logging: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/getting-started-guides/logging.md
|
||||
# With Elasticsearch and Kibana logging: https://github.com/GoogleCloudPlatform/kubernetes/blob/master/docs/getting-started-guides/logging-elasticsearch.md
|
||||
|
||||
.PHONY: up down logger-up logger-down logger10-up logger10-down
|
||||
|
||||
up: logger-up logger10-up
|
||||
|
||||
down: logger-down logger10-down
|
||||
|
||||
logger-up:
|
||||
kubectl create -f synthetic_0_25lps.yaml
|
||||
|
||||
logger-down:
|
||||
kubectl delete pod synthetic-logger-0.25lps-pod
|
||||
|
||||
logger10-up:
|
||||
kubectl create -f synthetic_10lps.yaml
|
||||
|
||||
logger10-down:
|
||||
kubectl delete pod synthetic-logger-10lps-pod
|
||||
|
@@ -1,16 +0,0 @@
|
||||
# Elasticsearch/Kibana Logging Demonstration
|
||||
This directory contains two [pod](../../docs/pods.md) specifications which can be used as synthetic
|
||||
logging sources. The pod specification in [synthetic_0_25lps.yaml](synthetic_0_25lps.yaml)
|
||||
describes a pod that just emits a log message once every 4 seconds. The pod specification in
|
||||
[synthetic_10lps.yaml](synthetic_10lps.yaml)
|
||||
describes a pod that just emits 10 log lines per second.
|
||||
|
||||
To observe the ingested log lines when using Google Cloud Logging please see the getting
|
||||
started instructions
|
||||
at [Cluster Level Logging to Google Cloud Logging](/docs/getting-started-guides/logging.md).
|
||||
To observe the ingested log lines when using Elasticsearch and Kibana please see the getting
|
||||
started instructions
|
||||
at [Cluster Level Logging with Elasticsearch and Kibana](/docs/getting-started-guides/logging-elasticsearch.md).
|
||||
|
||||
|
||||
[]()
|
Binary file not shown.
Before Width: | Height: | Size: 87 KiB |
@@ -1,30 +0,0 @@
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
name: synthetic-logger-0.25lps-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
args:
|
||||
- bash
|
||||
- -c
|
||||
- 'i="0"; while true; do echo -n "`hostname`: $i: "; date --rfc-3339 ns; sleep
|
||||
4; i=$[$i+1]; done'
|
||||
|
@@ -1,30 +0,0 @@
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
name: synthetic-logger-10lps-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
args:
|
||||
- bash
|
||||
- -c
|
||||
- 'i="0"; while true; do echo -n "`hostname`: $i: "; date --rfc-3339 ns; sleep
|
||||
0.1; i=$[$i+1]; done'
|
||||
|
@@ -1,3 +1,35 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/meteor/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
Meteor on Kuberenetes
|
||||
=====================
|
||||
|
||||
@@ -11,7 +43,7 @@ Meteor uses MongoDB, and we will use the `GCEPersistentDisk` type of
|
||||
volume for persistent storage. Therefore, this example is only
|
||||
applicable to [Google Compute
|
||||
Engine](https://cloud.google.com/compute/). Take a look at the
|
||||
[volumes documentation](/docs/volumes.md) for other options.
|
||||
[volumes documentation](../../docs/user-guide/volumes.md) for other options.
|
||||
|
||||
First, if you have not already done so:
|
||||
|
||||
@@ -24,13 +56,14 @@ billing](https://developers.google.com/console/help/new/#billing).
|
||||
Authenticate with gcloud and set the gcloud default project name to
|
||||
point to the project you want to use for your Kubernetes cluster:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
gcloud auth login
|
||||
gcloud config set project <project-name>
|
||||
```
|
||||
|
||||
Next, start up a Kubernetes cluster:
|
||||
```shell
|
||||
|
||||
```sh
|
||||
wget -q -O - https://get.k8s.io | bash
|
||||
```
|
||||
|
||||
@@ -49,6 +82,7 @@ files to your existing Meteor project `Dockerfile` and
|
||||
|
||||
`Dockerfile` should contain the below lines. You should replace the
|
||||
`ROOT_URL` with the actual hostname of your app.
|
||||
|
||||
```
|
||||
FROM chees/meteor-kubernetes
|
||||
ENV ROOT_URL http://myawesomeapp.com
|
||||
@@ -57,6 +91,7 @@ ENV ROOT_URL http://myawesomeapp.com
|
||||
The `.dockerignore` file should contain the below lines. This tells
|
||||
Docker to ignore the files on those directories when it's building
|
||||
your container.
|
||||
|
||||
```
|
||||
.meteor/local
|
||||
packages/*/.build*
|
||||
@@ -71,6 +106,7 @@ free to use this app for this example.
|
||||
|
||||
Now you can build your container by running this in
|
||||
your Meteor project directory:
|
||||
|
||||
```
|
||||
docker build -t my-meteor .
|
||||
```
|
||||
@@ -81,6 +117,7 @@ Pushing to a registry
|
||||
For the [Docker Hub](https://hub.docker.com/), tag your app image with
|
||||
your username and push to the Hub with the below commands. Replace
|
||||
`<username>` with your Hub username.
|
||||
|
||||
```
|
||||
docker tag my-meteor <username>/my-meteor
|
||||
docker push <username>/my-meteor
|
||||
@@ -90,6 +127,7 @@ For [Google Container
|
||||
Registry](https://cloud.google.com/tools/container-registry/), tag
|
||||
your app image with your project ID, and push to GCR. Replace
|
||||
`<project>` with your project ID.
|
||||
|
||||
```
|
||||
docker tag my-meteor gcr.io/<project>/my-meteor
|
||||
gcloud docker push gcr.io/<project>/my-meteor
|
||||
@@ -104,35 +142,42 @@ and make sure the `image:` points to the container you just pushed to
|
||||
the Docker Hub or GCR.
|
||||
|
||||
We will need to provide MongoDB a persistent Kuberetes volume to
|
||||
store its data. See the [volumes documentation](/docs/volumes.md) for
|
||||
store its data. See the [volumes documentation](../../docs/user-guide/volumes.md) for
|
||||
options. We're going to use Google Compute Engine persistent
|
||||
disks. Create the MongoDB disk by running:
|
||||
|
||||
```
|
||||
gcloud compute disks create --size=200GB mongo-disk
|
||||
```
|
||||
|
||||
Now you can start Mongo using that disk:
|
||||
|
||||
```
|
||||
kubectl create -f mongo-pod.json
|
||||
kubectl create -f mongo-service.json
|
||||
kubectl create -f examples/meteor/mongo-pod.json
|
||||
kubectl create -f examples/meteor/mongo-service.json
|
||||
```
|
||||
|
||||
Wait until Mongo is started completely and then start up your Meteor app:
|
||||
|
||||
```
|
||||
kubectl create -f meteor-controller.json
|
||||
kubectl create -f meteor-service.json
|
||||
kubectl create -f examples/meteor/meteor-service.json
|
||||
kubectl create -f examples/meteor/meteor-controller.json
|
||||
```
|
||||
|
||||
Note that [`meteor-service.json`](meteor-service.json) creates a load balancer, so
|
||||
your app should be available through the IP of that load balancer once
|
||||
the Meteor pods are started. You can find the IP of your load balancer
|
||||
the Meteor pods are started. We also created the service before creating the rc to
|
||||
aid the scheduler in placing pods, as the scheduler ranks pod placement according to
|
||||
service anti-affinity (among other things). You can find the IP of your load balancer
|
||||
by running:
|
||||
|
||||
```
|
||||
kubectl get services/meteor --template="{{range .status.loadBalancer.ingress}} {{.ip}} {{end}}"
|
||||
kubectl get service meteor --template="{{range .status.loadBalancer.ingress}} {{.ip}} {{end}}"
|
||||
```
|
||||
|
||||
You will have to open up port 80 if it's not open yet in your
|
||||
environment. On Google Compute Engine, you may run the below command.
|
||||
|
||||
```
|
||||
gcloud compute firewall-rules create meteor-80 --allow=tcp:80 --target-tags kubernetes-minion
|
||||
```
|
||||
@@ -147,7 +192,8 @@ to get an insight of what happens during the `docker build` step. The
|
||||
image is based on the Node.js official image. It then installs Meteor
|
||||
and copies in your apps' code. The last line specifies what happens
|
||||
when your app container is run.
|
||||
```
|
||||
|
||||
```sh
|
||||
ENTRYPOINT MONGO_URL=mongodb://$MONGO_SERVICE_HOST:$MONGO_SERVICE_PORT /usr/local/bin/node main.js
|
||||
```
|
||||
|
||||
@@ -155,7 +201,7 @@ Here we can see the MongoDB host and port information being passed
|
||||
into the Meteor app. The `MONGO_SERVICE...` environment variables are
|
||||
set by Kubernetes, and point to the service named `mongo` specified in
|
||||
[`mongo-service.json`](mongo-service.json). See the [environment
|
||||
documentation](/docs/container-environment.md) for more details.
|
||||
documentation](../../docs/user-guide/container-environment.md) for more details.
|
||||
|
||||
As you may know, Meteor uses long lasting connections, and requires
|
||||
_sticky sessions_. With Kubernetes you can scale out your app easily
|
||||
@@ -163,13 +209,15 @@ with session affinity. The
|
||||
[`meteor-service.json`](meteor-service.json) file contains
|
||||
`"sessionAffinity": "ClientIP"`, which provides this for us. See the
|
||||
[service
|
||||
documentation](/docs/services.md#virtual-ips-and-service-proxies) for
|
||||
documentation](../../docs/user-guide/services.md#virtual-ips-and-service-proxies) for
|
||||
more information.
|
||||
|
||||
As mentioned above, the mongo container uses a volume which is mapped
|
||||
to a persistent disk by Kubernetes. In [`mongo-pod.json`](mongo-pod.json) the container
|
||||
section specifies the volume:
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "mongo-disk",
|
||||
@@ -179,7 +227,9 @@ section specifies the volume:
|
||||
|
||||
The name `mongo-disk` refers to the volume specified outside the
|
||||
container section:
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"volumes": [
|
||||
{
|
||||
"name": "mongo-disk",
|
||||
@@ -192,4 +242,6 @@ container section:
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,35 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/meteor/dockerbase/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
Building the meteor-kubernetes base image
|
||||
-----------------------------------------
|
||||
|
||||
@@ -9,4 +41,6 @@ To build and push the base meteor-kubernetes image:
|
||||
docker push chees/meteor-kubernetes
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -9,9 +9,6 @@
|
||||
},
|
||||
"spec": {
|
||||
"replicas": 2,
|
||||
"selector": {
|
||||
"name": "meteor"
|
||||
},
|
||||
"template": {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
@@ -26,12 +23,9 @@
|
||||
"ports": [
|
||||
{
|
||||
"name": "http-server",
|
||||
"hostPort": 80,
|
||||
"containerPort": 8080,
|
||||
"protocol": "TCP"
|
||||
"containerPort": 8080
|
||||
}
|
||||
],
|
||||
"resources": {}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@@ -25,11 +25,9 @@
|
||||
"ports": [
|
||||
{
|
||||
"name": "mongo",
|
||||
"containerPort": 27017,
|
||||
"protocol": "TCP"
|
||||
"containerPort": 27017
|
||||
}
|
||||
],
|
||||
"resources": {},
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "mongo-disk",
|
||||
|
@@ -10,7 +10,6 @@
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"protocol": "TCP",
|
||||
"port": 27017,
|
||||
"targetPort": "mongo"
|
||||
}
|
||||
|
@@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis
|
||||
redis-sentinel: "true"
|
||||
role: master
|
||||
name: redis-master
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: MASTER
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.5"
|
||||
volumeMounts:
|
||||
- mountPath: /redis-master-data
|
||||
name: data
|
||||
- name: sentinel
|
||||
image: kubernetes/redis:v1
|
||||
env:
|
||||
- name: SENTINEL
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 26379
|
||||
volumes:
|
||||
- name: data
|
||||
emptyDir: {}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
labels:
|
||||
name: redis-proxy
|
||||
role: proxy
|
||||
name: redis-proxy
|
||||
spec:
|
||||
containers:
|
||||
- name: proxy
|
||||
image: kubernetes/redis-proxy:v1
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
name: api
|
@@ -1,16 +1,49 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/mysql-wordpress-pd/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Persistent Installation of MySQL and WordPress on Kubernetes
|
||||
|
||||
This example describes how to run a persistent installation of [Wordpress](https://wordpress.org/) using the [volumes](/docs/volumes.md) feature of Kubernetes, and [Google Compute Engine](https://cloud.google.com/compute/docs/disks) [persistent disks](/docs/volumes.md#gcepersistentdisk).
|
||||
This example describes how to run a persistent installation of [Wordpress](https://wordpress.org/) using the [volumes](../../docs/user-guide/volumes.md) feature of Kubernetes, and [Google Compute Engine](https://cloud.google.com/compute/docs/disks) [persistent disks](../../docs/user-guide/volumes.md#gcepersistentdisk).
|
||||
|
||||
We'll use the [mysql](https://registry.hub.docker.com/_/mysql/) and [wordpress](https://registry.hub.docker.com/_/wordpress/) official [Docker](https://www.docker.com/) images for this installation. (The wordpress image includes an Apache server).
|
||||
|
||||
We'll create two Kubernetes [pods](../../docs/pods.md) to run mysql and wordpress, both with associated persistent disks, then set up a Kubernetes [service](../../docs/services.md) to front each pod.
|
||||
We'll create two Kubernetes [pods](../../docs/user-guide/pods.md) to run mysql and wordpress, both with associated persistent disks, then set up a Kubernetes [service](../../docs/user-guide/services.md) to front each pod.
|
||||
|
||||
This example demonstrates several useful things, including: how to set up and use persistent disks with Kubernetes pods; how to define Kubernetes services to leverage docker-links-compatible service environment variables; and use of an external load balancer to expose the wordpress service externally and make it transparent to the user if the wordpress pod moves to a different cluster node.
|
||||
|
||||
## Get started on Google Compute Engine (GCE)
|
||||
|
||||
Because we're using the `GCEPersistentDisk` type of volume for persistent storage, this example is only applicable to [Google Compute Engine](https://cloud.google.com/compute/). Take a look at the [volumes documentation](/docs/volumes.md) for other options.
|
||||
Because we're using the `GCEPersistentDisk` type of volume for persistent storage, this example is only applicable to [Google Compute Engine](https://cloud.google.com/compute/). Take a look at the [volumes documentation](../../docs/user-guide/volumes.md) for other options.
|
||||
|
||||
First, if you have not already done so:
|
||||
|
||||
@@ -20,13 +53,14 @@ First, if you have not already done so:
|
||||
|
||||
Authenticate with gcloud and set the gcloud default project name to point to the project you want to use for your Kubernetes cluster:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
gcloud auth login
|
||||
gcloud config set project <project-name>
|
||||
```
|
||||
|
||||
Next, start up a Kubernetes cluster:
|
||||
```shell
|
||||
|
||||
```sh
|
||||
wget -q -O - https://get.k8s.io | bash
|
||||
```
|
||||
|
||||
@@ -34,21 +68,21 @@ Please see the [GCE getting started guide](../../docs/getting-started-guides/gce
|
||||
|
||||
## Create two persistent disks
|
||||
|
||||
For this WordPress installation, we're going to configure our Kubernetes [pods](../../docs/pods.md) to use [persistent disks](https://cloud.google.com/compute/docs/disks). This means that we can preserve installation state across pod shutdown and re-startup.
|
||||
For this WordPress installation, we're going to configure our Kubernetes [pods](../../docs/user-guide/pods.md) to use [persistent disks](https://cloud.google.com/compute/docs/disks). This means that we can preserve installation state across pod shutdown and re-startup.
|
||||
|
||||
You will need to create the disks in the same [GCE zone](https://cloud.google.com/compute/docs/zones) as the Kubernetes cluster. The default setup script will create the cluster in the `us-central1-b` zone, as seen in the [config-default.sh](/cluster/gce/config-default.sh) file. Replace `$ZONE` below with the appropriate zone.
|
||||
You will need to create the disks in the same [GCE zone](https://cloud.google.com/compute/docs/zones) as the Kubernetes cluster. The default setup script will create the cluster in the `us-central1-b` zone, as seen in the [config-default.sh](../../cluster/gce/config-default.sh) file. Replace `$ZONE` below with the appropriate zone.
|
||||
|
||||
We will create two disks: one for the mysql pod, and one for the wordpress pod. In this example, we create 20GB disks, which will be sufficient for this demo. Feel free to change the size to align with your needs, as wordpress requirements can vary. Also, keep in mind that [disk performance scales with size](https://cloud.google.com/compute/docs/disks/#comparison_of_disk_types).
|
||||
|
||||
First create the mysql disk.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
gcloud compute disks create --size=20GB --zone=$ZONE mysql-disk
|
||||
```
|
||||
|
||||
Then create the wordpress disk.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
gcloud compute disks create --size=20GB --zone=$ZONE wordpress-disk
|
||||
```
|
||||
|
||||
@@ -99,14 +133,14 @@ spec:
|
||||
Note that we've defined a volume mount for `/var/lib/mysql`, and specified a volume that uses the persistent disk (`mysql-disk`) that you created.
|
||||
Once you've edited the file to set your database password, create the pod as follows, where `<kubernetes>` is the path to your Kubernetes installation:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f mysql.yaml
|
||||
```sh
|
||||
$ kubectl create -f examples/mysql-wordpress-pd/mysql.yaml
|
||||
```
|
||||
|
||||
It may take a short period before the new pod reaches the `Running` state.
|
||||
List all pods to see the status of this new pod and the cluster node that it is running on:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get pods
|
||||
```
|
||||
|
||||
@@ -115,7 +149,7 @@ $ kubectl get pods
|
||||
|
||||
You can take a look at the logs for a pod by using `kubectl.sh log`. For example:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl logs mysql
|
||||
```
|
||||
|
||||
@@ -123,8 +157,8 @@ If you want to do deeper troubleshooting, e.g. if it seems a container is not st
|
||||
|
||||
### Start the Mysql service
|
||||
|
||||
We'll define and start a [service](../../docs/services.md) that lets other pods access the mysql database on a known port and host.
|
||||
We will specifically name the service `mysql`. This will let us leverage the support for [Docker-links-compatible](../../docs/services.md#how-do-they-work) service environment variables when we set up the wordpress pod. The wordpress Docker image expects to be linked to a mysql container named `mysql`, as you can see in the "How to use this image" section on the wordpress docker hub [page](https://registry.hub.docker.com/_/wordpress/).
|
||||
We'll define and start a [service](../../docs/user-guide/services.md) that lets other pods access the mysql database on a known port and host.
|
||||
We will specifically name the service `mysql`. This will let us leverage the support for [Docker-links-compatible](../../docs/user-guide/services.md#how-do-they-work) service environment variables when we set up the wordpress pod. The wordpress Docker image expects to be linked to a mysql container named `mysql`, as you can see in the "How to use this image" section on the wordpress docker hub [page](https://registry.hub.docker.com/_/wordpress/).
|
||||
|
||||
So if we label our Kubernetes mysql service `mysql`, the wordpress pod will be able to use the Docker-links-compatible environment variables, defined by Kubernetes, to connect to the database.
|
||||
|
||||
@@ -148,13 +182,13 @@ spec:
|
||||
|
||||
Start the service like this:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f mysql-service.yaml
|
||||
```sh
|
||||
$ kubectl create -f examples/mysql-wordpress-pd/mysql-service.yaml
|
||||
```
|
||||
|
||||
You can see what services are running via:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get services
|
||||
```
|
||||
|
||||
@@ -198,14 +232,14 @@ spec:
|
||||
|
||||
Create the pod:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f wordpress.yaml
|
||||
```sh
|
||||
$ kubectl create -f examples/mysql-wordpress-pd/wordpress.yaml
|
||||
```
|
||||
|
||||
And list the pods to check that the status of the new pod changes
|
||||
to `Running`. As above, this might take a minute.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get pods
|
||||
```
|
||||
|
||||
@@ -237,23 +271,25 @@ Note also that we've set the service port to 80. We'll return to that shortly.
|
||||
|
||||
Start the service:
|
||||
|
||||
```shell
|
||||
$ kubectl create -f wordpress-service.yaml
|
||||
```sh
|
||||
$ kubectl create -f examples/mysql-wordpress-pd/wordpress-service.yaml
|
||||
```
|
||||
|
||||
and see it in the list of services:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ kubectl get services
|
||||
```
|
||||
|
||||
Then, find the external IP for your WordPress service by running:
|
||||
|
||||
```
|
||||
$ kubectl get services/wpfrontend --template="{{range .status.loadBalancer.ingress}} {{.ip}} {{end}}"
|
||||
```
|
||||
|
||||
or by listing the forwarding rules for your project:
|
||||
```shell
|
||||
|
||||
```sh
|
||||
$ gcloud compute forwarding-rules list
|
||||
```
|
||||
|
||||
@@ -263,7 +299,7 @@ Look for the rule called `wpfrontend`, which is what we named the wordpress serv
|
||||
|
||||
To access your new installation, you first may need to open up port 80 (the port specified in the wordpress service config) in the firewall for your cluster. You can do this, e.g. via:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ gcloud compute firewall-rules create sample-http --allow tcp:80
|
||||
```
|
||||
|
||||
@@ -284,9 +320,9 @@ Set up your WordPress blog and play around with it a bit. Then, take down its p
|
||||
|
||||
If you are just experimenting, you can take down and bring up only the pods:
|
||||
|
||||
```shell
|
||||
$ kubectl delete -f wordpress.yaml
|
||||
$ kubectl delete -f mysql.yaml
|
||||
```sh
|
||||
$ kubectl delete -f examples/mysql-wordpress-pd/wordpress.yaml
|
||||
$ kubectl delete -f examples/mysql-wordpress-pd/mysql.yaml
|
||||
```
|
||||
|
||||
When you restart the pods again (using the `create` operation as described above), their services will pick up the new pods based on their labels.
|
||||
@@ -295,9 +331,11 @@ If you want to shut down the entire app installation, you can delete the service
|
||||
|
||||
If you are ready to turn down your Kubernetes cluster altogether, run:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ cluster/kube-down.sh
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/nfs/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# Example of NFS volume
|
||||
|
||||
See [nfs-web-pod.yaml](nfs-web-pod.yaml) for a quick example, how to use NFS volume
|
||||
@@ -8,10 +41,11 @@ in a pod.
|
||||
The example below shows how to export a NFS share from a pod and import it
|
||||
into another one.
|
||||
|
||||
###Prerequisites
|
||||
### Prerequisites
|
||||
|
||||
The nfs server pod creates a privileged container, so if you are using a Salt based KUBERNETES_PROVIDER (**gce**, **vagrant**, **aws**), you have to enable the ability to create privileged containers by API.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
#At the root of Kubernetes source code
|
||||
$ vi cluster/saltbase/pillar/privilege.sls
|
||||
|
||||
@@ -50,4 +84,6 @@ Now the pod serves `index.html` from the NFS server:
|
||||
Hello World!
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/nfs/exporter/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# NFS-exporter container
|
||||
|
||||
Inspired by https://github.com/cpuguy83/docker-nfs-server. Rewritten for
|
||||
@@ -10,4 +43,6 @@ Usage::
|
||||
docker run -d --name nfs --privileged jsafrane/nfsexporter /path/to/share /path/to/share2 ...
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/nfs/nfs-data/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
# NFS-exporter container with a file
|
||||
|
||||
This container exports /mnt/data with index.html in it via NFSv4. Based on
|
||||
@@ -6,4 +39,7 @@ This container exports /mnt/data with index.html in it via NFSv4. Based on
|
||||
Available in dockerhub as
|
||||
[jsafrane/nfs-data](https://registry.hub.docker.com/u/jsafrane/nfs-data/).
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,63 +0,0 @@
|
||||
## Node selection example
|
||||
|
||||
This example shows how to assign a pod to a specific node or to one of a set of nodes using node labels and the nodeSelector field in a pod specification. Generally this is unnecessary, as the scheduler will take care of things for you, but you may want to do so in certain circumstances like to ensure that your pod ends up on a machine with an SSD attached to it.
|
||||
|
||||
### Step Zero: Prerequisites
|
||||
|
||||
This example assumes that you have a basic understanding of kubernetes pods and that you have [turned up a Kubernetes cluster](https://github.com/GoogleCloudPlatform/kubernetes#documentation).
|
||||
|
||||
### Step One: Attach label to the node
|
||||
|
||||
Run `kubectl get nodes` to get the names of your cluster's nodes. Pick out the one that you want to add a label to.
|
||||
|
||||
Then, to add a label to the node you've chosen, run `kubectl label nodes <node-name> <label-key>=<label-value>`. For example, if my node name is 'kubernetes-foo-node-1.c.a-robinson.internal' and my desired label is 'disktype=ssd', then I can run `kubectl label nodes kubernetes-foo-node-1.c.a-robinson.internal disktype=ssd`.
|
||||
|
||||
If this fails with an "invalid command" error, you're likely using an older version of kubectl that doesn't have the `label` command. In that case, see the [previous version](https://github.com/GoogleCloudPlatform/kubernetes/blob/a053dbc313572ed60d89dae9821ecab8bfd676dc/examples/node-selection/README.md) of this guide for instructions on how to manually set labels on a node.
|
||||
|
||||
Also, note that label keys must be in the form of DNS labels (as described in the [identifiers doc](/docs/design/identifiers.md)), meaning that they are not allowed to contain any upper-case letters.
|
||||
|
||||
You can verify that it worked by re-running `kubectl get nodes` and checking that the node now has a label.
|
||||
|
||||
### Step Two: Add a nodeSelector field to your pod configuration
|
||||
|
||||
Take whatever pod config file you want to run, and add a nodeSelector section to it, like this. For example, if this is my pod config:
|
||||
|
||||
<pre>
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
env: test
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
</pre>
|
||||
|
||||
Then add a nodeSelector like so:
|
||||
|
||||
<pre>
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
env: test
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
<b>nodeSelector:
|
||||
disktype: ssd</b>
|
||||
</pre>
|
||||
|
||||
When you then run `kubectl create -f pod.yaml`, the pod will get scheduled on the node that you attached the label to! You can verify that it worked by running `kubectl get pods -o wide` and looking at the "NODE" that the pod was assigned to.
|
||||
|
||||
### Conclusion
|
||||
|
||||
While this example only covered one node, you can attach labels to as many nodes as you want. Then when you schedule a pod with a nodeSelector, it can be scheduled on any of the nodes that satisfy that nodeSelector. Be careful that it will match at least one node, however, because if it doesn't the pod won't be scheduled at all.
|
||||
|
||||
|
||||
[]()
|
@@ -1,13 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
env: test
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
nodeSelector:
|
||||
disktype: ssd
|
@@ -1,3 +1,36 @@
|
||||
<!-- BEGIN MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
<!-- BEGIN STRIP_FOR_RELEASE -->
|
||||
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
<img src="http://kubernetes.io/img/warning.png" alt="WARNING"
|
||||
width="25" height="25">
|
||||
|
||||
<h2>PLEASE NOTE: This document applies to the HEAD of the source tree</h2>
|
||||
|
||||
If you are using a released version of Kubernetes, you should
|
||||
refer to the docs that go with that version.
|
||||
|
||||
<strong>
|
||||
The latest 1.0.x release of this document can be found
|
||||
[here](http://releases.k8s.io/release-1.0/examples/openshift-origin/README.md).
|
||||
|
||||
Documentation for other releases can be found at
|
||||
[releases.k8s.io](http://releases.k8s.io).
|
||||
</strong>
|
||||
--
|
||||
|
||||
<!-- END STRIP_FOR_RELEASE -->
|
||||
|
||||
<!-- END MUNGE: UNVERSIONED_WARNING -->
|
||||
|
||||
## OpenShift Origin example
|
||||
|
||||
This example shows how to run OpenShift Origin as a pod on an existing Kubernetes cluster.
|
||||
@@ -18,7 +51,7 @@ OpenShift Origin creates privileged containers when running Docker builds during
|
||||
If you are using a Salt based KUBERNETES_PROVIDER (**gce**, **vagrant**, **aws**), you should enable the
|
||||
ability to create privileged containers via the API.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ cd kubernetes
|
||||
$ vi cluster/saltbase/pillar/privilege.sls
|
||||
|
||||
@@ -28,14 +61,14 @@ allow_privileged: true
|
||||
|
||||
Now spin up a cluster using your preferred KUBERNETES_PROVIDER
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ export KUBERNETES_PROVIDER=gce
|
||||
$ cluster/kube-up.sh
|
||||
```
|
||||
|
||||
Next, let's setup some variables, and create a local folder that will hold generated configuration files.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ export OPENSHIFT_EXAMPLE=$(pwd)/examples/openshift-origin
|
||||
$ export OPENSHIFT_CONFIG=${OPENSHIFT_EXAMPLE}/config
|
||||
$ mkdir ${OPENSHIFT_CONFIG}
|
||||
@@ -61,7 +94,7 @@ An external load balancer is needed to route traffic to our OpenShift master ser
|
||||
Kubernetes cluster.
|
||||
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ cluster/kubectl.sh create -f $OPENSHIFT_EXAMPLE/openshift-service.yaml
|
||||
```
|
||||
|
||||
@@ -74,7 +107,7 @@ build default certificates.
|
||||
|
||||
Grab the public IP address of the service we previously created.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ export PUBLIC_IP=$(cluster/kubectl.sh get services openshift --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}")
|
||||
$ echo $PUBLIC_IP
|
||||
```
|
||||
@@ -83,7 +116,7 @@ Ensure you have a valid PUBLIC_IP address before continuing in the example.
|
||||
|
||||
We now need to run a command on your host to generate a proper OpenShift configuration. To do this, we will volume mount the configuration directory that holds your Kubernetes kubeconfig file from the prior step.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
docker run --privileged -v ${OPENSHIFT_CONFIG}:/config openshift/origin start master --write-config=/config --kubeconfig='/config/kubeconfig' --master='https://localhost:8443' --public-master='https://${PUBLIC_IP}:8443'
|
||||
```
|
||||
|
||||
@@ -103,13 +136,13 @@ $ sudo -E chown -R ${USER} ${OPENSHIFT_CONFIG}
|
||||
|
||||
Then run the following command to collapse them into a Kubernetes secret.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
docker run -i -t --privileged -e="OPENSHIFTCONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin ex bundle-secret openshift-config -f /config &> ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
```
|
||||
|
||||
Now, lets create the secret in your Kubernetes cluster.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
```
|
||||
|
||||
@@ -122,13 +155,13 @@ We are now ready to deploy OpenShift.
|
||||
We will deploy a pod that runs the OpenShift master. The OpenShift master will delegate to the underlying Kubernetes
|
||||
system to manage Kubernetes specific resources. For the sake of simplicity, the OpenShift master will run with an embedded etcd to hold OpenShift specific content. This demonstration will evolve in the future to show how to run etcd in a pod so that content is not destroyed if the OpenShift master fails.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/openshift-controller.yaml
|
||||
```
|
||||
|
||||
You should now get a pod provisioned whose name begins with openshift.
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ cluster/kubectl.sh get pods | grep openshift
|
||||
$ cluster/kubectl.sh log openshift-t7147 origin
|
||||
Running: cluster/../cluster/gce/../../cluster/../_output/dockerized/bin/linux/amd64/kubectl logs openshift-t7t47 origin
|
||||
@@ -138,7 +171,7 @@ Running: cluster/../cluster/gce/../../cluster/../_output/dockerized/bin/linux/am
|
||||
|
||||
Depending upon your cloud provider, you may need to open up an external firewall rule for tcp:8443. For GCE, you can run the following:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
gcloud compute --project "your-project" firewall-rules create "origin" --allow tcp:8443 --network "your-network" --source-ranges "0.0.0.0/0"
|
||||
```
|
||||
|
||||
@@ -148,11 +181,13 @@ Open a browser and visit the OpenShift master public address reported in your lo
|
||||
|
||||
You can use the CLI commands by running the following:
|
||||
|
||||
```shell
|
||||
```sh
|
||||
$ docker run --privileged --entrypoint="/usr/bin/bash" -it -e="OPENSHIFTCONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin
|
||||
$ osc config use-context public-default
|
||||
$ osc --help
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
||||
|
@@ -1,114 +0,0 @@
|
||||
# How To Use Persistent Volumes
|
||||
|
||||
The purpose of this guide is to help you become familiar with Kubernetes Persistent Volumes. By the end of the guide, we'll have
|
||||
nginx serving content from your persistent volume.
|
||||
|
||||
This guide assumes knowledge of Kubernetes fundamentals and that you have a cluster up and running.
|
||||
|
||||
## Provisioning
|
||||
|
||||
A Persistent Volume (PV) in Kubernetes represents a real piece of underlying storage capacity in the infrastructure. Cluster administrators
|
||||
must first create storage (create their Google Compute Engine (GCE) disks, export their NFS shares, etc.) in order for Kubernetes to mount it.
|
||||
|
||||
PVs are intended for "network volumes" like GCE Persistent Disks, NFS shares, and AWS ElasticBlockStore volumes. ```HostPath``` was included
|
||||
for ease of development and testing. You'll create a local ```HostPath``` for this example.
|
||||
|
||||
> IMPORTANT! For ```HostPath``` to work, you will need to run a single node cluster. Kubernetes does not
|
||||
support local storage on the host at this time. There is no guarantee your pod ends up on the correct node where the ```HostPath``` resides.
|
||||
|
||||
|
||||
```
|
||||
|
||||
// this will be nginx's webroot
|
||||
mkdir /tmp/data01
|
||||
echo 'I love Kubernetes storage!' > /tmp/data01/index.html
|
||||
|
||||
```
|
||||
|
||||
PVs are created by posting them to the API server.
|
||||
|
||||
```
|
||||
|
||||
kubectl create -f examples/persistent-volumes/volumes/local-01.yaml
|
||||
kubectl get pv
|
||||
|
||||
NAME LABELS CAPACITY ACCESSMODES STATUS CLAIM
|
||||
pv0001 map[] 10737418240 RWO Available
|
||||
|
||||
```
|
||||
|
||||
## Requesting storage
|
||||
|
||||
Users of Kubernetes request persistent storage for their pods. They don't know how the underlying cluster is provisioned.
|
||||
They just know they can rely on their claim to storage and can manage its lifecycle independently from the many pods that may use it.
|
||||
|
||||
Claims must be created in the same namespace as the pods that use them.
|
||||
|
||||
```
|
||||
|
||||
kubectl create -f examples/persistent-volumes/claims/claim-01.yaml
|
||||
kubectl get pvc
|
||||
|
||||
NAME LABELS STATUS VOLUME
|
||||
myclaim-1 map[]
|
||||
|
||||
|
||||
# A background process will attempt to match this claim to a volume.
|
||||
# The eventual state of your claim will look something like this:
|
||||
|
||||
kubectl get pvc
|
||||
|
||||
NAME LABELS STATUS VOLUME
|
||||
myclaim-1 map[] Bound f5c3a89a-e50a-11e4-972f-80e6500a981e
|
||||
|
||||
|
||||
kubectl get pv
|
||||
|
||||
NAME LABELS CAPACITY ACCESSMODES STATUS CLAIM
|
||||
pv0001 map[] 10737418240 RWO Bound myclaim-1 / 6bef4c40-e50b-11e4-972f-80e6500a981e
|
||||
|
||||
```
|
||||
|
||||
## Using your claim as a volume
|
||||
|
||||
Claims are used as volumes in pods. Kubernetes uses the claim to look up its bound PV. The PV is then exposed to the pod.
|
||||
|
||||
```
|
||||
|
||||
kubectl create -f examples/persistent-volumes/simpletest/pod.yaml
|
||||
|
||||
kubectl get pods
|
||||
|
||||
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED
|
||||
mypod 172.17.0.2 myfrontend nginx 127.0.0.1/127.0.0.1 <none> Running 12 minutes
|
||||
|
||||
|
||||
kubectl create -f examples/persistent-volumes/simpletest/service.json
|
||||
kubectl get services
|
||||
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
frontendservice <none> name=frontendhttp 10.0.0.241 3000/TCP
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
|
||||
|
||||
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
You should be able to query your service endpoint and see what content nginx is serving. A "forbidden" error might mean you
|
||||
need to disable SELinux (setenforce 0).
|
||||
|
||||
```
|
||||
|
||||
curl 10.0.0.241:3000
|
||||
I love Kubernetes storage!
|
||||
|
||||
```
|
||||
|
||||
Hopefully this simple guide is enough to get you started with PersistentVolumes. If you have any questions, join
|
||||
```#google-containers``` on IRC and ask!
|
||||
|
||||
Enjoy!
|
||||
|
||||
|
||||
[]()
|
@@ -1,10 +0,0 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: myclaim-1
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 3Gi
|
@@ -1,10 +0,0 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: myclaim-2
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 8Gi
|
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"kind": "PersistentVolumeClaim",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "myclaim-3"
|
||||
}, "spec": {
|
||||
"accessModes": [
|
||||
"ReadWriteOnce",
|
||||
"ReadOnlyMany"
|
||||
],
|
||||
"resources": {
|
||||
"requests": {
|
||||
"storage": "10G"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,10 +0,0 @@
|
||||
{
|
||||
"kind": "Namespace",
|
||||
"apiVersion":"v1",
|
||||
"metadata": {
|
||||
"name": "myns",
|
||||
"labels": {
|
||||
"name": "development"
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,20 +0,0 @@
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: mypod
|
||||
labels:
|
||||
name: frontendhttp
|
||||
spec:
|
||||
containers:
|
||||
- name: myfrontend
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: "http-server"
|
||||
volumeMounts:
|
||||
- mountPath: "/var/www/html"
|
||||
name: mypd
|
||||
volumes:
|
||||
- name: mypd
|
||||
persistentVolumeClaim:
|
||||
claimName: myclaim-1
|
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"kind": "Service",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "frontendservice"
|
||||
},
|
||||
"spec": {
|
||||
"ports": [
|
||||
{
|
||||
"protocol": "TCP",
|
||||
"port": 3000,
|
||||
"targetPort": "http-server"
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"name": "frontendhttp"
|
||||
}
|
||||
}
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pv0003
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadOnlyMany
|
||||
gcePersistentDisk:
|
||||
pdName: "abc123"
|
||||
fsType: "ext4"
|
@@ -1,13 +0,0 @@
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pv0001
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "/tmp/data01"
|
@@ -1,14 +0,0 @@
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pv0002
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
capacity:
|
||||
storage: 8Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "/tmp/data02"
|
||||
persistentVolumeReclaimPolicy: Recycle
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user