diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index cbbc5b9b5bd..aec925710ea 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -37,44 +37,101 @@ Documentation for other releases can be found at This example shows how to build a simple, multi-tier web application using Kubernetes and [Docker](https://www.docker.com/). **Table of Contents** + - - [Step Zero: Prerequisites](#step-zero-prerequisites) - - [Step One: Start up the redis master](#step-one-start-up-the-redis-master) - - [Optional Interlude](#optional-interlude) - - [Step Two: Fire up the redis master service](#step-two-fire-up-the-redis-master-service) - - [Finding a service](#finding-a-service) - - [Step Three: Fire up the replicated slave pods](#step-three-fire-up-the-replicated-slave-pods) - - [Step Four: Create the redis slave service](#step-four-create-the-redis-slave-service) - - [Step Five: Create the frontend replicated pods](#step-five-create-the-frontend-replicated-pods) - - [Step Six: Set up the guestbook frontend service](#step-six-set-up-the-guestbook-frontend-service) - - [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific) - - [Create the Frontend Service](#create-the-frontend-service) - - [Accessing the guestbook site externally](#accessing-the-guestbook-site-externally) - - [Google Compute Engine External Load Balancer Specifics](#gce-external-load-balancer-specifics) - - [Step Seven: Cleanup](#step-seven-cleanup) - - [Troubleshooting](#troubleshooting) + - [Guestbook Example](#guestbook-example) + - [Prerequisites](#prerequisites) + - [Quick Start](#quick-start) + - [Step One: Start up the redis master](#step-one-start-up-the-redis-master) + - [Define a replication controller](#define-a-replication-controller) + - [Define a service](#define-a-service) + - [Create a service](#create-a-service) + - [Finding a service](#finding-a-service) + - [Create a replication controller](#create-a-replication-controller) + - [Optional Interlude](#optional-interlude) + - [Step Two: Start up the redis slave](#step-two-start-up-the-redis-slave) + - [Step Three: Start up the guestbook frontend](#step-three-start-up-the-guestbook-frontend) + - [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific) + - [Step Four: Cleanup](#step-four-cleanup) + - [Troubleshooting](#troubleshooting) + - [Appendix: Accessing the guestbook site externally](#appendix-accessing-the-guestbook-site-externally) + - [Google Compute Engine External Load Balancer Specifics](#google-compute-engine-external-load-balancer-specifics) + + The example consists of: - A web frontend - A [redis](http://redis.io/) master (for storage), and a replicated set of redis 'slaves'. -The web front end interacts with the redis master via javascript redis API calls. +The web frontend interacts with the redis master via javascript redis API calls. **Note**: If you are running this example on a [Google Container Engine](https://cloud.google.com/container-engine/) installation, see [this Container Engine guestbook walkthrough](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead. The basic concepts are the same, but the walkthrough is tailored to a Container Engine setup. -### Step Zero: Prerequisites +### Prerequisites + +This example requires a running Kubernetes cluster. See the [Getting Started guides](../../docs/getting-started-guides/) for how to get started. And follow the [Prerequisites](../../docs/user-guide/prereqs.md) to make sure your `kubectl` is ok. As noted above, if you have a Google Container Engine cluster set up, go [here](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead. + +### Quick Start + +This section shows a simplest way to get the example work. If you want to know the details, you should skip this and read [the rest of the example](#step-one-start-up-the-redis-master). + +Start the guestbook with one command: + +```console +$ kubectl create -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml +service "redis-master" created +replicationcontroller "redis-master" created +service "redis-slave" created +replicationcontroller "redis-slave" created +service "frontend" created +replicationcontroller "frontend" created +``` + +You also can start the guestbook by running: + +```console +$ kubectl create -f examples/guestbook/ +``` + +Then, list all your services: + +```console +$ kubectl get services +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +frontend 10.0.93.211 80/TCP app=guestbook,tier=frontend 1h +redis-master 10.0.136.3 6379/TCP app=redis,role=master,tier=backend 1h +redis-slave 10.0.21.92 6379/TCP app=redis,role=slave,tier=backend 1h +``` + +Now you can access the guestbook on each node with frontend service's `:Port`, e.g. `10.0.93.211:80` in this guide. `` is an a cluster-internal IP. If you want to access the guestbook from outside of the cluster, add `type: NodePort` to frontend service `spec` field. Then you can access the guestbook with `:NodePort` from outside of the cluster. On cloud providers which support external load balancers, setting the type field to "LoadBalancer" will provision a load balancer for your Service. There are several ways for you to access the guestbook. You may learn from [Accessing services running on the cluster](../../docs/user-guide/accessing-the-cluster.md#accessing-services-running-on-the-cluster). + +Clean up the guestbook: + +```console +$ kubectl delete -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml +``` + +or + +```console +$ kubectl delete -f examples/guestbook/ +``` -This example requires a running Kubernetes cluster. See the [Getting Started guides](../../docs/getting-started-guides/) for how to get started. As noted above, if you have a Google Container Engine cluster set up, go [here](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead. ### Step One: Start up the redis master +Before continuing to the gory details, we also recommend you to read [Quick walkthrough](../../docs/user-guide/#quick-walkthrough), [Thorough walkthough](../../docs/user-guide/#thorough-walkthrough) and [Concept guide](../../docs/user-guide/#concept-guide). **Note**: The redis master in this example is *not* highly available. Making it highly available would be an interesting, but intricate exercise— redis doesn't actually support multi-master deployments at this point in time, so high availability would be a somewhat tricky thing to implement, and might involve periodic serialization to disk, and so on. +#### Define a replication controller + To start the redis master, use the file `examples/guestbook/redis-master-controller.yaml`, which describes a single [pod](../../docs/user-guide/pods.md) running a redis key-value server in a container. Although we have a single instance of our redis master, we are using a [replication controller](../../docs/user-guide/replication-controller.md) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the replication controller will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.) +The file `examples/guestbook/redis-master-controller.yaml` defines the redis master replication controller: + ```yaml @@ -82,20 +139,36 @@ apiVersion: v1 kind: ReplicationController metadata: name: redis-master + # these labels can be applied automatically + # from the labels in the pod template if not set labels: - name: redis-master + app: redis + role: master + tier: backend spec: + # this replicas value is default + # modify it according to your case replicas: 1 - selector: - name: redis-master + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # role: master + # tier: backend template: metadata: labels: - name: redis-master + app: redis + role: master + tier: backend spec: containers: - name: master image: redis + resources: + requests: + cpu: 100m + memory: 100Mi ports: - containerPort: 6379 ``` @@ -103,20 +176,101 @@ spec: [Download example](redis-master-controller.yaml?raw=true) -Change to the `/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running: +#### Define a service + +A Kubernetes [service](../../docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](../../docs/user-guide/labels.md) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP. + +Services find the pods to load balance based on the pods' labels. +The selector field of the service description determines which pods will receive the traffic sent to the service, and the `port` and `targetPort` information defines what port the service proxy will run at. + +The file `examples/guestbook/redis-master-service.yaml` defines the redis master service: + + + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: redis-master + labels: + app: redis + role: master + tier: backend +spec: + ports: + # the port that this service should serve on + - port: 6379 + targetPort: 6379 + selector: + app: redis + role: master + tier: backend +``` + +[Download example](redis-master-service.yaml?raw=true) + + +#### Create a service + +According to the [config best practices](../../docs/user-guide/config-best-practices.md), create a service before corresponding replication controllers so that the scheduler can spread the pods comprising the service. So we first create the service by running: + +```console +$ kubectl create -f examples/guestbook/redis-master-service.yaml +service "redis-master" created +``` + +Then check the list of services, which should include the redis-master: + +```console +$ kubectl get services +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +redis-master 10.0.136.3 6379/TCP app=redis,role=master,tier=backend 1h +``` + +This will cause all pods to see the redis master apparently running on :6379. A service can map an incoming port to any `targetPort` in the backend pod. Once created, the service proxy on each node is configured to set up a proxy on the specified port (in this case port 6379). + +`targetPort` will default to `port` if it is omitted in the configuration. For simplicity's sake, we omit it in the following configurations. + +The traffic flow from slaves to masters can be described in two steps, like so: + + - A *redis slave* will connect to "port" on the *redis master service* + - Traffic will be forwarded from the service "port" (on the service node) to the *targetPort* on the pod that the service listens to. + +For more details, please see [Connecting applications](../../docs/user-guide/connecting-applications.md). + +#### Finding a service + +Kubernetes supports two primary modes of finding a service— environment variables and DNS. + +The services in a Kubernetes cluster are discoverable inside other containers [via environment variables](../../docs/user-guide/services.md#environment-variables). + +An alternative is to use the [cluster's DNS service](../../docs/user-guide/services.md#dns), if it has been enabled for the cluster. This lets all pods do name resolution of services automatically, based on the service name. + +This example has been configured to use the DNS service by default. + +If your cluster does not have the DNS service enabled, then you can use environment variables by setting the +`GET_HOSTS_FROM` env value in both +`examples/guestbook/redis-slave-controller.yaml` and `examples/guestbook/frontend-controller.yaml` +from `dns` to `env` before you start up the app. +(However, this is unlikely to be necessary. You can check for the DNS service in the list of the clusters' services by +running `kubectl --namespace=kube-system get rc`, and looking for a controller prefixed `kube-dns`.) +Note that switching to env causes creation-order dependencies, since services need to be created before their clients that require env vars. + +#### Create a replication controller + +Second create the redis master pod in your Kubernetes cluster by running: ```console $ kubectl create -f examples/guestbook/redis-master-controller.yaml -replicationcontrollers/redis-master +replicationcontroller "redis-master" created ``` -The `replicationcontrollers/redis-master` line is the expected response to this operation. You can see the replication controllers for your cluster by running: ```console $ kubectl get rc -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -redis-master master redis name=redis-master 1 +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +redis-master master redis app=redis,role=master,tier=backend 1 ``` Then, you can list the pods in the cluster, to verify that the master is running: @@ -129,9 +283,9 @@ You'll see all pods in the cluster, including the redis master pod, and the stat The name of the redis master will look similar to that in the following list: ```console -NAME READY STATUS RESTARTS AGE +NAME READY STATUS RESTARTS AGE ... -redis-master-dz33o 1/1 Running 0 2h +redis-master-dz33o 1/1 Running 0 2h ``` (Note that an initial `docker pull` to grab a container image may take a few minutes, depending on network conditions. A pod will be reported as `Pending` while its image is being downloaded.) @@ -139,9 +293,11 @@ redis-master-dz33o 1/1 Running 0 2h `kubectl get pods` will show only the pods in the default [namespace](../../docs/user-guide/namespaces.md). To see pods in all namespaces, run: ``` -kubectl get pods -o wide --all-namespaces=true +kubectl get pods --all-namespaces ``` +For more details, please see [Configuring containers](../../docs/user-guide/configuring-containers.md) and [Deploying applications](../../docs/user-guide/deploying-applications.md). + #### Optional Interlude You can get information about a pod, including the machine that it is running on, via `kubectl describe pods/`. E.g., for the redis master, you should see something like the following (your pod name will be different): @@ -152,7 +308,7 @@ $ kubectl describe pods/redis-master-dz33o Name: redis-master-dz33o Image(s): redis Node: kubernetes-minion-krxw/10.240.67.201 -Labels: name=redis-master +Labels: app=redis,role=master,tier=backend Status: Running Replication Controllers: redis-master (1/1 replicas created) Containers: @@ -188,9 +344,9 @@ Then, you can look at the docker containers on the remote machine. You should s ```console me@kubernetes-minion-krxw:~$ sudo docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES ... -0ffef9649265 redis:latest "redis-server /etc/r" About a minute ago Up About a minute k8s_redis-master.767aef46_redis-master-controller-gb50a.default.api_4530d7b3-ae5d-11e4-bf77-42010af0d719_579ee964 +0ffef9649265 redis:latest "/entrypoint.sh redi" About a minute ago Up About a minute k8s_master.869d22f3_redis-master-dz33o_default_1449a58a-5ead-11e5-a104-688f84ef8ef6_d74cb2b5 ``` If you want to see the logs for a given container, you can run: @@ -199,111 +355,73 @@ If you want to see the logs for a given container, you can run: $ docker logs ``` -### Step Two: Fire up the redis master service +### Step Two: Start up the redis slave -A Kubernetes [service](../../docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](../../docs/user-guide/labels.md) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP. +Now that the redis master is running, we can start up its 'read slaves'. -Services find the pods to load balance based on the pods' labels. -The pod that you created in [Step One](#step-one-start-up-the-redis-master) has the label `name=redis-master`. -The selector field of the service description determines which pods will receive the traffic sent to the service, and the `port` and `targetPort` information defines what port the service proxy will run at. +We'll define these as replicated pods as well, though this time— unlike for the redis master— we'll define the number of replicas to be 2. +In Kubernetes, a replication controller is responsible for managing multiple instances of a replicated pod. The replication controller will automatically launch new pods if the number of replicas falls below the specified number. +(This particular replicated pod is a great one to test this with -- you can try killing the docker processes for your pods directly, then watch them come back online on a new node shortly thereafter.) -The file `examples/guestbook/redis-master-service.yaml` defines the redis master service: +Just like the master, we want to have a service to proxy connections to the redis slaves. In this case, in addition to discovery, the slave service will provide transparent load balancing to web app clients. - +This time we put the service and RC into one [file](../../docs/user-guide/managing-deployments.md#organizing-resource-configurations). Group related objects together in a single file. This is often better than separate files. +The specification for the slaves is in `examples/guestbook/all-in-one/redis-slave.yaml`: + + ```yaml apiVersion: v1 kind: Service metadata: - name: redis-master + name: redis-slave labels: - name: redis-master + app: redis + role: slave + tier: backend spec: ports: # the port that this service should serve on - port: 6379 - targetPort: 6379 selector: - name: redis-master -``` - -[Download example](redis-master-service.yaml?raw=true) - - -Create the service by running: - -```console -$ kubectl create -f examples/guestbook/redis-master-service.yaml -services/redis-master -``` - -Then check the list of services, which should include the redis-master: - -```console -$ kubectl get services -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -redis-master 10.0.136.3 6379/TCP app=redis,role=master 1h -... -``` - -This will cause all pods to see the redis master apparently running on :6379. A service can map an incoming port to any `targetPort` in the backend pod. Once created, the service proxy on each node is configured to set up a proxy on the specified port (in this case port 6379). - -`targetPort` will default to `port` if it is omitted in the configuration. For simplicity's sake, we omit it in the following configurations. - -The traffic flow from slaves to masters can be described in two steps, like so: - - - A *redis slave* will connect to "port" on the *redis master service* - - Traffic will be forwarded from the service "port" (on the service node) to the *targetPort* on the pod that the service listens to. - -#### Finding a service - -Kubernetes supports two primary modes of finding a service— environment variables and DNS. - -The services in a Kubernetes cluster are discoverable inside other containers [via environment variables](../../docs/user-guide/services.md#environment-variables). - -An alternative is to use the [cluster's DNS service](../../docs/user-guide/services.md#dns), if it has been enabled for the cluster. This lets all pods do name resolution of services automatically, based on the service name. - -This example has been configured to use the DNS service by default. - -If your cluster does not have the DNS service enabled, then you can use environment variables by setting the -`GET_HOSTS_FROM` env value in both -`examples/guestbook/redis-slave-controller.yaml` and `examples/guestbook/frontend-controller.yaml` -from `dns` to `env` before you start up the app. -(However, this is unlikely to be necessary. You can check for the DNS service in the list of the clusters' services by -running `kubectl --namespace=kube-system get rc`, and looking for a controller prefixed `kube-dns`.) - - -### Step Three: Fire up the replicated slave pods - -Now that the redis master is running, we can start up its 'read slaves'. - -We'll define these as replicated pods as well, though this time— unlike for the redis master— we'll define the number of replicas to be 2. -In Kubernetes, a replication controller is responsible for managing multiple instances of a replicated pod. The replication controller will automatically launch new pods if the number of replicas falls below the specified number. -(This particular replicated pod is a great one to test this with -- you can try killing the docker processes for your pods directly, then watch them come back online on a new node shortly thereafter.) - -To create the replicated pod, use the file `examples/guestbook/redis-slave-controller.yaml`, which looks like this: - - - -```yaml + app: redis + role: slave + tier: backend +--- apiVersion: v1 kind: ReplicationController metadata: name: redis-slave + # these labels can be applied automatically + # from the labels in the pod template if not set labels: - name: redis-slave + app: redis + role: slave + tier: backend spec: + # this replicas value is default + # modify it according to your case replicas: 2 - selector: - name: redis-slave + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # role: slave + # tier: backend template: metadata: labels: - name: redis-slave + app: redis + role: slave + tier: backend spec: containers: - - name: worker + - name: slave image: gcr.io/google_samples/gb-redisslave:v1 + resources: + requests: + cpu: 100m + memory: 100Mi env: - name: GET_HOSTS_FROM value: dns @@ -316,104 +434,102 @@ spec: - containerPort: 6379 ``` -[Download example](redis-slave-controller.yaml?raw=true) - +[Download example](all-in-one/redis-slave.yaml?raw=true) + -and create the replication controller by running: +This time the selector for the service is `app=redis,role=slave,tier=backend`, because that identifies the pods running redis slaves. It is generally helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "app=redis,role=slave,tier=backend"` command. More lables usage, see [using-labels-effectively](../../docs/user-guide/managing-deployments.md#using-labels-effectively). + +Now that you have created the specification, create it in your cluster by running: ```console -$ kubectl create -f examples/guestbook/redis-slave-controller.yaml -replicationcontrollers/redis-slave +$ kubectl create -f examples/guestbook/all-in-one/redis-slave.yaml +service "redis-slave" created +replicationcontroller "redis-slave" created + +$ kubectl get services +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +redis-master 10.0.136.3 6379/TCP app=redis,role=master,tier=backend 1h +redis-slave 10.0.21.92 6379/TCP app=redis,role=slave,tier=backend 1h $ kubectl get rc -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -redis-master master redis name=redis-master 1 -redis-slave slave gcr.io/google_samples/gb-redisslave:v1 name=redis-slave 2 +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +redis-master master redis app=redis,role=master,tier=backend 1 +redis-slave slave gcr.io/google_samples/gb-redisslave:v1 app=redis,role=slave,tier=backend 2 ``` Once the replication controller is up, you can list the pods in the cluster, to verify that the master and slaves are running. You should see a list that includes something like the following: ```console $ kubectl get pods -NAME READY STATUS RESTARTS AGE +NAME READY STATUS RESTARTS AGE ... -redis-master-dz33o 1/1 Running 0 2h -redis-slave-35mer 1/1 Running 0 2h -redis-slave-iqkhy 1/1 Running 0 2h +redis-master-dz33o 1/1 Running 0 2h +redis-slave-35mer 1/1 Running 0 2h +redis-slave-iqkhy 1/1 Running 0 2h ``` -You should see a single redis master pod and two redis slave pods. As mentioned above, you can get more information about any pod with: `kubectl describe pods/`. +You should see a single redis master pod and two redis slave pods. As mentioned above, you can get more information about any pod with: `kubectl describe pods/`. And also can view the resources on [kube-ui](../../docs/user-guide/ui.md). -### Step Four: Create the redis slave service +### Step Three: Start up the guestbook frontend -Just like the master, we want to have a service to proxy connections to the redis slaves. In this case, in addition to discovery, the slave service will provide transparent load balancing to web app clients. +A frontend pod is a simple PHP server that is configured to talk to either the slave or master services, depending on whether the client request is a read or a write. It exposes a simple AJAX interface, and serves an Angular-based UX. +Again we'll create a set of replicated frontend pods instantiated by a replication controller— this time, with three replicas. -The service specification for the slaves is in `examples/guestbook/redis-slave-service.yaml`: +As with the other pods, we now want to create a service to group the frontend pods. +The RC and service are described in the file `frontend.yaml`: - + ```yaml apiVersion: v1 kind: Service metadata: - name: redis-slave + name: frontend labels: - name: redis-slave + app: guestbook + tier: frontend spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer ports: # the port that this service should serve on - - port: 6379 + - port: 80 selector: - name: redis-slave -``` - -[Download example](redis-slave-service.yaml?raw=true) - - -This time the selector for the service is `name=redis-slave`, because that identifies the pods running redis slaves. It may also be helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "label=value"` command. - -Now that you have created the service specification, create it in your cluster by running: - -```console -$ kubectl create -f examples/guestbook/redis-slave-service.yaml -services/redis-slave - -$ kubectl get services -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -redis-master 10.0.136.3 6379/TCP app=redis,role=master 1h -redis-slave 10.0.21.92 6379/TCP app-redis,role=slave 1h -``` - -### Step Five: Create the frontend replicated pods - - - -A frontend pod is a simple PHP server that is configured to talk to either the slave or master services, depending on whether the client request is a read or a write. It exposes a simple AJAX interface, and serves an Angular-based UX. -Again we'll create a set of replicated frontend pods instantiated by a replication controller— this time, with three replicas. - -The pod is described in the file `examples/guestbook/frontend-controller.yaml`: - - - -```yaml + app: guestbook + tier: frontend +--- apiVersion: v1 kind: ReplicationController metadata: name: frontend + # these labels can be applied automatically + # from the labels in the pod template if not set labels: - name: frontend + app: guestbook + tier: frontend spec: + # this replicas value is default + # modify it according to your case replicas: 3 - selector: - name: frontend + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # tier: frontend template: metadata: labels: - name: frontend + app: guestbook + tier: frontend spec: containers: - name: php-redis image: gcr.io/google_samples/gb-frontend:v3 + resources: + requests: + cpu: 100m + memory: 100Mi env: - name: GET_HOSTS_FROM value: dns @@ -426,44 +542,61 @@ spec: - containerPort: 80 ``` -[Download example](frontend-controller.yaml?raw=true) - +[Download example](all-in-one/frontend.yaml?raw=true) + -Using this file, you can turn up your frontend with: +#### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) + +For supported cloud providers, such as Google Compute Engine or Google Container Engine, you can specify to use an external load balancer +in the service `spec`, to expose the service onto an external load balancer IP. +To do this, uncomment the `type: LoadBalancer` line in the `frontend.yaml` file before you start the service. + +[See the appendix below](#appendix-accessing-the-guestbook-site-externally) on accessing the guestbook site externally for more details. + +Create the service and replication controller like this: ```console -$ kubectl create -f examples/guestbook/frontend-controller.yaml -replicationcontrollers/frontend +$ kubectl create -f examples/guestbook/all-in-one/frontend.yaml +service "frontend" created +replicationcontroller "frontend" created ``` -Then, list all your replication controllers: +Then, list all your services again: + +```console +$ kubectl get services +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +frontend 10.0.93.211 80/TCP app=guestbook,tier=frontend 1h +redis-master 10.0.136.3 6379/TCP app=redis,role=master,tier=backend 1h +redis-slave 10.0.21.92 6379/TCP app=redis,role=slave,tier=backend 1h +``` + +Also list all your replication controllers: ```console $ kubectl get rc -CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS -frontend php-redis kubernetes/example-guestbook-php-redis:v3 name=frontend 3 -redis-master master redis name=redis-master 1 -redis-slave slave gcr.io/google_samples/gb-redisslave:v1 name=redis-slave 2 +CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS +frontend php-redis kubernetes/example-guestbook-php-redis:v3 app=guestbook,tier=frontend 3 +redis-master master redis app=redis,role=master,tier=backend 1 +redis-slave slave gcr.io/google_samples/gb-redisslave:v1 app=redis,role=slave,tier=backend 2 ``` -Once it's up (again, it may take up to thirty seconds to create the pods) you can list the pods in the cluster, to verify that the master, slaves and frontends are all running. You should see a list that includes something like the following: +Once it's up (again, it may take up to thirty seconds to create the pods) you can list the pods with specified labels the cluster, to verify that the master, slaves and frontends are all running. You should see a list contains pods with label tier like the following: ```console -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -... -frontend-4o11g 1/1 Running 0 2h -frontend-u9aq6 1/1 Running 0 2h -frontend-yga1l 1/1 Running 0 2h -... -redis-master-dz33o 1/1 Running 0 2h -redis-slave-35mer 1/1 Running 0 2h -redis-slave-iqkhy 1/1 Running 0 2h +$ kubectl get pods -L tier +NAME READY STATUS RESTARTS AGE TIER +frontend-4o11g 1/1 Running 0 2h frontend +frontend-u9aq6 1/1 Running 0 2h frontend +frontend-yga1l 1/1 Running 0 2h frontend +redis-master-dz33o 1/1 Running 0 2h backend +redis-slave-35mer 1/1 Running 0 2h backend +redis-slave-iqkhy 1/1 Running 0 2h backend ``` You should see a single redis master pod, two redis slaves, and three frontend pods. -The code for the PHP server that the frontends are running is in `guestbook/php-redis/guestbook.php`. It looks like this: +The code for the PHP server that the frontends are running is in `examples/guestbook/php-redis/guestbook.php`. It looks like this: ```php - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: frontend - labels: - name: frontend -spec: - # if your cluster supports it, uncomment the following to automatically create - # an external load-balanced IP for the frontend service. - # type: LoadBalancer - ports: - # the port that this service should serve on - - port: 80 - selector: - name: frontend -``` - -[Download example](frontend-service.yaml?raw=true) - - -#### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) - -For supported cloud providers, such as Google Compute Engine or Google Container Engine, you can specify to use an external load balancer -in the service `spec`, to expose the service onto an external load balancer IP. -To do this, uncomment the `type: LoadBalancer` line in the `frontend-service.yaml` file before you start the service. - -[See the section below](#accessing-the-guestbook-site-externally) on accessing the guestbook site externally for more details. - - -#### Create the Frontend Service #### - -Create the service like this: +If you are in a live kubernetes cluster, you can just kill the pods by deleteing the replication controllers and services. Using labels to select the resources to delete is an easy way to do this in one command. ```console -$ kubectl create -f examples/guestbook/frontend-service.yaml -services/frontend -``` - -Then, list all your services again: - -```console -$ kubectl get services -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -frontend 10.0.93.211 80/TCP name=frontend 1h -redis-master 10.0.136.3 6379/TCP app=redis,role=master 1h -redis-slave 10.0.21.92 6379/TCP app-redis,role=slave 1h -``` - - -#### Accessing the guestbook site externally - - - -You'll want to set up your guestbook service so that it can be accessed from outside of the internal Kubernetes network. Above, we introduced one way to do that, using the `type: LoadBalancer` spec. - -More generally, Kubernetes supports two ways of exposing a service onto an external IP address: `NodePort`s and `LoadBalancer`s , as described [here](../../docs/user-guide/services.md#publishing-services---service-types). - -If the `LoadBalancer` specification is used, it can take a short period for an external IP to show up in `kubectl get services` output, but you should shortly see it listed as well, e.g. like this: - -```console -$ kubectl get services -NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE -frontend 10.0.93.211 130.211.188.51 80/TCP name=frontend 1h -redis-master 10.0.136.3 6379/TCP app=redis,role=master 1h -redis-slave 10.0.21.92 6379/TCP app-redis,role=slave 1h -``` - -Once you've exposed the service to an external IP, visit the IP to see your guestbook in action. E.g., `http://130.211.188.51:80` in the example above. - -You should see a web page that looks something like this (without the messages). Try adding some entries to it! - - - -If you are more advanced in the ops arena, you can also manually get the service IP from looking at the output of `kubectl get pods,services`, and modify your firewall using standard tools and services (firewalld, iptables, selinux) which you are already familiar with. - -##### Google Compute Engine External Load Balancer Specifics - -In Google Compute Engine, `kubectl` automatically creates forwarding rule for services with `LoadBalancer`. - -You can list the forwarding rules like this. The forwarding rule also indicates the external IP. - -```console -$ gcloud compute forwarding-rules list -NAME REGION IP_ADDRESS IP_PROTOCOL TARGET -frontend us-central1 130.211.188.51 TCP us-central1/targetPools/frontend -``` - -In Google Compute Engine, you also may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion` (replace with your tags as appropriate): - -```console -$ gcloud compute firewall-rules create --allow=tcp:80 --target-tags=kubernetes-minion kubernetes-minion-80 -``` - -For Google Compute Engine details about limiting traffic to specific sources, see the [Google Compute Engine firewall documentation][gce-firewall-docs]. - -[cloud-console]: https://console.developer.google.com -[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls - -### Step Seven: Cleanup - -If you are in a live kubernetes cluster, you can just kill the pods by deleting the replication controllers and the services. Using labels to select the resources to stop or delete is an easy way to do this in one command. - -```console -kubectl delete rc -l "name in (redis-master, redis-slave, frontend)" -kubectl delete service -l "name in (redis-master, redis-slave, frontend)" +$ kubectl delete rc -l "app in (redis, guestbook)" +$ kubectl delete service -l "app in (redis, guestbook)" ``` To completely tear down a Kubernetes cluster, if you ran this from source, you can use: @@ -642,6 +668,56 @@ If you are having trouble bringing up your guestbook app, double check that your Then, see the [troubleshooting documentation](../../docs/troubleshooting.md) for a further list of common issues and how you can diagnose them. + +### Appendix: Accessing the guestbook site externally + +You'll want to set up your guestbook service so that it can be accessed from outside of the internal Kubernetes network. Above, we introduced one way to do that, using the `type: LoadBalancer` spec. + +More generally, Kubernetes supports two ways of exposing a service onto an external IP address: `NodePort`s and `LoadBalancer`s , as described [here](../../docs/user-guide/services.md#publishing-services---service-types). + +If the `LoadBalancer` specification is used, it can take a short period for an external IP to show up in `kubectl get services` output, but you should shortly see it listed as well, e.g. like this: + +```console +$ kubectl get services +NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE +frontend 10.0.93.211 130.211.188.51 80/TCP app=guestbook,tier=frontend 1h +redis-master 10.0.136.3 6379/TCP app=redis,role=master,tier=backend 1h +redis-slave 10.0.21.92 6379/TCP app=redis,role=master,tier=backend 1h +``` + +Once you've exposed the service to an external IP, visit the IP to see your guestbook in action. E.g., `http://130.211.188.51:80` in the example above. + +You should see a web page that looks something like this (without the messages). Try adding some entries to it! + + + +If you are more advanced in the ops arena, you can also manually get the service IP from looking at the output of `kubectl get pods,services`, and modify your firewall using standard tools and services (firewalld, iptables, selinux) which you are already familiar with. + +#### Google Compute Engine External Load Balancer Specifics + +In Google Compute Engine, Kubernetes automatically creates forwarding rule for services with `LoadBalancer`. + +You can list the forwarding rules like this. The forwarding rule also indicates the external IP. + +```console +$ gcloud compute forwarding-rules list +NAME REGION IP_ADDRESS IP_PROTOCOL TARGET +frontend us-central1 130.211.188.51 TCP us-central1/targetPools/frontend +``` + +In Google Compute Engine, you also may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-minion` (replace with your tags as appropriate): + +```console +$ gcloud compute firewall-rules create --allow=tcp:80 --target-tags=kubernetes-minion kubernetes-minion-80 +``` + +For GCE kubernetes startup details, see the [Getting started on Google Compute Engine](../../docs/getting-started-guides/gce.md) + +For Google Compute Engine details about limiting traffic to specific sources, see the [Google Compute Engine firewall documentation][gce-firewall-docs]. + +[cloud-console]: https://console.developer.google.com +[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/guestbook/README.md?pixel)]() diff --git a/examples/guestbook/all-in-one/frontend.yaml b/examples/guestbook/all-in-one/frontend.yaml new file mode 100644 index 00000000000..780f8da1a09 --- /dev/null +++ b/examples/guestbook/all-in-one/frontend.yaml @@ -0,0 +1,59 @@ +apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + # the port that this service should serve on + - port: 80 + selector: + app: guestbook + tier: frontend +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: frontend + # these labels can be applied automatically + # from the labels in the pod template if not set + labels: + app: guestbook + tier: frontend +spec: + # this replicas value is default + # modify it according to your case + replicas: 3 + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v3 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access environment variables to find service host + # info, comment out the 'value: dns' line above, and uncomment the + # line below. + # value: env + ports: + - containerPort: 80 diff --git a/examples/guestbook/all-in-one/guestbook-all-in-one.yaml b/examples/guestbook/all-in-one/guestbook-all-in-one.yaml new file mode 100644 index 00000000000..44b3ad926bd --- /dev/null +++ b/examples/guestbook/all-in-one/guestbook-all-in-one.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: Service +metadata: + name: redis-master + labels: + app: redis + tier: backend + role: master +spec: + ports: + # the port that this service should serve on + - port: 6379 + targetPort: 6379 + selector: + app: redis + tier: backend + role: master +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: redis-master + # these labels can be applied automatically + # from the labels in the pod template if not set + labels: + app: redis + role: master + tier: backend +spec: + # this replicas value is default + # modify it according to your case + replicas: 1 + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # role: master + # tier: backend + template: + metadata: + labels: + app: redis + role: master + tier: backend + spec: + containers: + - name: master + image: redis + resources: + requests: + cpu: 100m + memory: 100Mi + ports: + - containerPort: 6379 +--- +apiVersion: v1 +kind: Service +metadata: + name: redis-slave + labels: + app: redis + tier: backend + role: slave +spec: + ports: + # the port that this service should serve on + - port: 6379 + selector: + app: redis + tier: backend + role: slave +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: redis-slave + # these labels can be applied automatically + # from the labels in the pod template if not set + labels: + app: redis + role: slave + tier: backend +spec: + # this replicas value is default + # modify it according to your case + replicas: 2 + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # role: slave + # tier: backend + template: + metadata: + labels: + app: redis + role: slave + tier: backend + spec: + containers: + - name: slave + image: gcr.io/google_samples/gb-redisslave:v1 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access an environment variable to find the master + # service's host, comment out the 'value: dns' line above, and + # uncomment the line below. + # value: env + ports: + - containerPort: 6379 +--- +apiVersion: v1 +kind: Service +metadata: + name: frontend + labels: + app: guestbook + tier: frontend +spec: + # if your cluster supports it, uncomment the following to automatically create + # an external load-balanced IP for the frontend service. + # type: LoadBalancer + ports: + # the port that this service should serve on + - port: 80 + selector: + app: guestbook + tier: frontend +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: frontend + # these labels can be applied automatically + # from the labels in the pod template if not set + labels: + app: guestbook + tier: frontend +spec: + # this replicas value is default + # modify it according to your case + replicas: 3 + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # tier: frontend + template: + metadata: + labels: + app: guestbook + tier: frontend + spec: + containers: + - name: php-redis + image: gcr.io/google_samples/gb-frontend:v3 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access environment variables to find service host + # info, comment out the 'value: dns' line above, and uncomment the + # line below. + # value: env + ports: + - containerPort: 80 diff --git a/examples/guestbook/all-in-one/redis-slave.yaml b/examples/guestbook/all-in-one/redis-slave.yaml new file mode 100644 index 00000000000..de2d08e1385 --- /dev/null +++ b/examples/guestbook/all-in-one/redis-slave.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Service +metadata: + name: redis-slave + labels: + app: redis + role: slave + tier: backend +spec: + ports: + # the port that this service should serve on + - port: 6379 + selector: + app: redis + role: slave + tier: backend +--- +apiVersion: v1 +kind: ReplicationController +metadata: + name: redis-slave + # these labels can be applied automatically + # from the labels in the pod template if not set + labels: + app: redis + role: slave + tier: backend +spec: + # this replicas value is default + # modify it according to your case + replicas: 2 + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # role: slave + # tier: backend + template: + metadata: + labels: + app: redis + role: slave + tier: backend + spec: + containers: + - name: slave + image: gcr.io/google_samples/gb-redisslave:v1 + resources: + requests: + cpu: 100m + memory: 100Mi + env: + - name: GET_HOSTS_FROM + value: dns + # If your cluster config does not include a dns service, then to + # instead access an environment variable to find the master + # service's host, comment out the 'value: dns' line above, and + # uncomment the line below. + # value: env + ports: + - containerPort: 6379 diff --git a/examples/guestbook/frontend-controller.yaml b/examples/guestbook/frontend-controller.yaml index 1a48f95b346..2eb08ce00b0 100644 --- a/examples/guestbook/frontend-controller.yaml +++ b/examples/guestbook/frontend-controller.yaml @@ -2,20 +2,33 @@ apiVersion: v1 kind: ReplicationController metadata: name: frontend + # these labels can be applied automatically + # from the labels in the pod template if not set labels: - name: frontend + app: guestbook + tier: frontend spec: + # this replicas value is default + # modify it according to your case replicas: 3 - selector: - name: frontend + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # tier: frontend template: metadata: labels: - name: frontend + app: guestbook + tier: frontend spec: containers: - name: php-redis image: gcr.io/google_samples/gb-frontend:v3 + resources: + requests: + cpu: 100m + memory: 100Mi env: - name: GET_HOSTS_FROM value: dns diff --git a/examples/guestbook/frontend-service.yaml b/examples/guestbook/frontend-service.yaml index 22f0273b095..72ea61327a7 100644 --- a/examples/guestbook/frontend-service.yaml +++ b/examples/guestbook/frontend-service.yaml @@ -3,13 +3,15 @@ kind: Service metadata: name: frontend labels: - name: frontend + app: guestbook + tier: frontend spec: # if your cluster supports it, uncomment the following to automatically create # an external load-balanced IP for the frontend service. # type: LoadBalancer ports: # the port that this service should serve on - - port: 80 + - port: 80 selector: - name: frontend + app: guestbook + tier: frontend diff --git a/examples/guestbook/redis-master-controller.yaml b/examples/guestbook/redis-master-controller.yaml index 2eebf2625c3..ed83fad584a 100644 --- a/examples/guestbook/redis-master-controller.yaml +++ b/examples/guestbook/redis-master-controller.yaml @@ -2,19 +2,35 @@ apiVersion: v1 kind: ReplicationController metadata: name: redis-master + # these labels can be applied automatically + # from the labels in the pod template if not set labels: - name: redis-master + app: redis + role: master + tier: backend spec: + # this replicas value is default + # modify it according to your case replicas: 1 - selector: - name: redis-master + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # role: master + # tier: backend template: metadata: labels: - name: redis-master + app: redis + role: master + tier: backend spec: containers: - name: master image: redis + resources: + requests: + cpu: 100m + memory: 100Mi ports: - containerPort: 6379 diff --git a/examples/guestbook/redis-master-service.yaml b/examples/guestbook/redis-master-service.yaml index b200cd6ec01..a9db7031280 100644 --- a/examples/guestbook/redis-master-service.yaml +++ b/examples/guestbook/redis-master-service.yaml @@ -3,11 +3,15 @@ kind: Service metadata: name: redis-master labels: - name: redis-master + app: redis + role: master + tier: backend spec: ports: # the port that this service should serve on - port: 6379 targetPort: 6379 selector: - name: redis-master + app: redis + role: master + tier: backend diff --git a/examples/guestbook/redis-slave-controller.yaml b/examples/guestbook/redis-slave-controller.yaml index 6e5dde18aa7..1d54a828043 100644 --- a/examples/guestbook/redis-slave-controller.yaml +++ b/examples/guestbook/redis-slave-controller.yaml @@ -2,20 +2,36 @@ apiVersion: v1 kind: ReplicationController metadata: name: redis-slave + # these labels can be applied automatically + # from the labels in the pod template if not set labels: - name: redis-slave + app: redis + role: slave + tier: backend spec: + # this replicas value is default + # modify it according to your case replicas: 2 - selector: - name: redis-slave + # selector can be applied automatically + # from the labels in the pod template if not set + # selector: + # app: guestbook + # role: slave + # tier: backend template: metadata: labels: - name: redis-slave + app: redis + role: slave + tier: backend spec: containers: - - name: worker + - name: slave image: gcr.io/google_samples/gb-redisslave:v1 + resources: + requests: + cpu: 100m + memory: 100Mi env: - name: GET_HOSTS_FROM value: dns diff --git a/examples/guestbook/redis-slave-service.yaml b/examples/guestbook/redis-slave-service.yaml index 1acc9def301..e1f04017140 100644 --- a/examples/guestbook/redis-slave-service.yaml +++ b/examples/guestbook/redis-slave-service.yaml @@ -3,10 +3,14 @@ kind: Service metadata: name: redis-slave labels: - name: redis-slave + app: redis + role: slave + tier: backend spec: ports: # the port that this service should serve on - port: 6379 selector: - name: redis-slave + app: redis + role: slave + tier: backend diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go index 4a4ab2f8f21..1b724c7e623 100644 --- a/test/e2e/kubectl.go +++ b/test/e2e/kubectl.go @@ -56,9 +56,9 @@ const ( kittenImage = "gcr.io/google_containers/update-demo:kitten" updateDemoSelector = "name=update-demo" updateDemoContainer = "update-demo" - frontendSelector = "name=frontend" - redisMasterSelector = "name=redis-master" - redisSlaveSelector = "name=redis-slave" + frontendSelector = "app=guestbook,tier=frontend" + redisMasterSelector = "app=redis,role=master" + redisSlaveSelector = "app=redis,role=slave" goproxyContainer = "goproxy" goproxyPodSelector = "name=goproxy" netexecContainer = "netexec"