Merge pull request #8596 from andronat/fix_8319
Kubectl command renaming (run-container to run and resize to scale)
This commit is contained in:
@@ -442,9 +442,9 @@ _kubectl_rolling-update()
|
||||
must_have_one_noun=()
|
||||
}
|
||||
|
||||
_kubectl_resize()
|
||||
_kubectl_scale()
|
||||
{
|
||||
last_command="kubectl_resize"
|
||||
last_command="kubectl_scale"
|
||||
commands=()
|
||||
|
||||
flags=()
|
||||
@@ -537,9 +537,9 @@ _kubectl_proxy()
|
||||
must_have_one_noun=()
|
||||
}
|
||||
|
||||
_kubectl_run-container()
|
||||
_kubectl_run()
|
||||
{
|
||||
last_command="kubectl_run-container"
|
||||
last_command="kubectl_run"
|
||||
commands=()
|
||||
|
||||
flags=()
|
||||
@@ -897,11 +897,11 @@ _kubectl()
|
||||
commands+=("namespace")
|
||||
commands+=("logs")
|
||||
commands+=("rolling-update")
|
||||
commands+=("resize")
|
||||
commands+=("scale")
|
||||
commands+=("exec")
|
||||
commands+=("port-forward")
|
||||
commands+=("proxy")
|
||||
commands+=("run-container")
|
||||
commands+=("run")
|
||||
commands+=("stop")
|
||||
commands+=("expose")
|
||||
commands+=("label")
|
||||
|
@@ -20,9 +20,9 @@ kubectl_logs.md
|
||||
kubectl_namespace.md
|
||||
kubectl_port-forward.md
|
||||
kubectl_proxy.md
|
||||
kubectl_resize.md
|
||||
kubectl_rolling-update.md
|
||||
kubectl_run-container.md
|
||||
kubectl_run.md
|
||||
kubectl_scale.md
|
||||
kubectl_stop.md
|
||||
kubectl_update.md
|
||||
kubectl_version.md
|
||||
|
@@ -189,7 +189,7 @@ These are verbs which change the fundamental type of data returned (watch return
|
||||
|
||||
Two additional verbs `redirect` and `proxy` provide access to cluster resources as described in [accessing-the-cluster.md](accessing-the-cluster.md).
|
||||
|
||||
When resources wish to expose alternative actions that are closely coupled to a single resource, they should do so using new sub-resources. An example is allowing automated processes to update the "status" field of a Pod. The `/pods` endpoint only allows updates to "metadata" and "spec", since those reflect end-user intent. An automated process should be able to modify status for users to see by sending an updated Pod kind to the server to the "/pods/<name>/status" endpoint - the alternate endpoint allows different rules to be applied to the update, and access to be appropriately restricted. Likewise, some actions like "stop" or "resize" are best represented as REST sub-resources that are POSTed to. The POST action may require a simple kind to be provided if the action requires parameters, or function without a request body.
|
||||
When resources wish to expose alternative actions that are closely coupled to a single resource, they should do so using new sub-resources. An example is allowing automated processes to update the "status" field of a Pod. The `/pods` endpoint only allows updates to "metadata" and "spec", since those reflect end-user intent. An automated process should be able to modify status for users to see by sending an updated Pod kind to the server to the "/pods/<name>/status" endpoint - the alternate endpoint allows different rules to be applied to the update, and access to be appropriately restricted. Likewise, some actions like "stop" or "scale" are best represented as REST sub-resources that are POSTed to. The POST action may require a simple kind to be provided if the action requires parameters, or function without a request body.
|
||||
|
||||
TODO: more documentation of Watch
|
||||
|
||||
|
@@ -47,7 +47,7 @@ See also issues with the following labels:
|
||||
1. A fairly general-purpose way to specify fields on the command line during creation and update, not just from a config file
|
||||
1. Extensible API-based generator framework (i.e. invoke generators via an API/URL rather than building them into kubectl), so that complex client libraries don’t need to be rewritten in multiple languages, and so that the abstractions are available through all interfaces: API, CLI, UI, logs, ... [#5280](https://github.com/GoogleCloudPlatform/kubernetes/issues/5280)
|
||||
1. Need schema registry, and some way to invoke generator (e.g., using a container)
|
||||
1. Convert run-container to API-based generator
|
||||
1. Convert run command to API-based generator
|
||||
1. Transformation framework
|
||||
1. More intelligent defaulting of fields (e.g., [#2643](https://github.com/GoogleCloudPlatform/kubernetes/issues/2643))
|
||||
1. Update preconditions based on the values of arbitrary object fields.
|
||||
|
@@ -191,7 +191,7 @@ NAME IMAGE(S SELECTOR REPLICAS
|
||||
Start a container running nginx with a replication controller and three replicas
|
||||
|
||||
```
|
||||
$ cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80
|
||||
$ cluster/kubectl.sh run my-nginx --image=nginx --replicas=3 --port=80
|
||||
```
|
||||
|
||||
When listing the pods, you will see that three containers have been started and are in Waiting state:
|
||||
@@ -243,10 +243,10 @@ myNginx nginx name=my-nginx 3
|
||||
|
||||
We did not start any services, hence there are none listed. But we see three replicas displayed properly.
|
||||
Check the [guestbook](../../examples/guestbook/README.md) application to learn how to create a service.
|
||||
You can already play with resizing the replicas with:
|
||||
You can already play with scaling the replicas with:
|
||||
|
||||
```sh
|
||||
$ ./cluster/kubectl.sh resize rc my-nginx --replicas=2
|
||||
$ ./cluster/kubectl.sh scale rc my-nginx --replicas=2
|
||||
$ ./cluster/kubectl.sh get pods
|
||||
NAME IMAGE(S) HOST LABELS STATUS
|
||||
7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running
|
||||
|
@@ -46,7 +46,7 @@ The `kubectl.sh` line below spins up two containers running
|
||||
[Nginx](http://nginx.org/en/) running on port 80:
|
||||
|
||||
```bash
|
||||
cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=2 --port=80
|
||||
cluster/kubectl.sh run my-nginx --image=nginx --replicas=2 --port=80
|
||||
```
|
||||
|
||||
To stop the containers:
|
||||
|
@@ -88,7 +88,7 @@ redis-slave-controller-gziey 10.2.1.4 slave brendanburns/redis
|
||||
|
||||
## Scaling
|
||||
|
||||
Two single-core minions are certainly not enough for a production system of today, and, as you can see, there is one _unassigned_ pod. Let's resize the cluster by adding a couple of bigger nodes.
|
||||
Two single-core minions are certainly not enough for a production system of today, and, as you can see, there is one _unassigned_ pod. Let's scale the cluster by adding a couple of bigger nodes.
|
||||
|
||||
You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/weave-demos/coreos-azure`).
|
||||
|
||||
@@ -96,9 +96,9 @@ First, lets set the size of new VMs:
|
||||
```
|
||||
export AZ_VM_SIZE=Large
|
||||
```
|
||||
Now, run resize script with state file of the previous deployment and number of minions to add:
|
||||
Now, run scale script with state file of the previous deployment and number of minions to add:
|
||||
```
|
||||
./resize-kubernetes-cluster.js ./output/kubernetes_1c1496016083b4_deployment.yml 2
|
||||
./scale-kubernetes-cluster.js ./output/kubernetes_1c1496016083b4_deployment.yml 2
|
||||
...
|
||||
azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kubernetes_8f984af944f572_ssh_conf <hostname>`
|
||||
azure_wrapper/info: The hosts in this deployment are:
|
||||
@@ -124,7 +124,7 @@ kube-03 environment=production Ready
|
||||
kube-04 environment=production Ready
|
||||
```
|
||||
|
||||
You can see that two more minions joined happily. Let's resize the number of Guestbook instances now.
|
||||
You can see that two more minions joined happily. Let's scale the number of Guestbook instances now.
|
||||
|
||||
First, double-check how many replication controllers there are:
|
||||
|
||||
@@ -134,12 +134,12 @@ CONTROLLER CONTAINER(S) IMAGE(S)
|
||||
frontend-controller php-redis kubernetes/example-guestbook-php-redis name=frontend 3
|
||||
redis-slave-controller slave brendanburns/redis-slave name=redisslave 2
|
||||
```
|
||||
As there are 4 minions, let's resize proportionally:
|
||||
As there are 4 minions, let's scale proportionally:
|
||||
```
|
||||
core@kube-00 ~ $ kubectl resize --replicas=4 rc redis-slave-controller
|
||||
resized
|
||||
core@kube-00 ~ $ kubectl resize --replicas=4 rc frontend-controller
|
||||
resized
|
||||
core@kube-00 ~ $ kubectl scale --replicas=4 rc redis-slave-controller
|
||||
scaled
|
||||
core@kube-00 ~ $ kubectl scale --replicas=4 rc frontend-controller
|
||||
scaled
|
||||
```
|
||||
Check what you have now:
|
||||
```
|
||||
@@ -182,7 +182,7 @@ If you don't wish care about the Azure bill, you can tear down the cluster. It's
|
||||
./destroy-cluster.js ./output/kubernetes_8f984af944f572_deployment.yml
|
||||
```
|
||||
|
||||
> Note: make sure to use the _latest state file_, as after resizing there is a new one.
|
||||
> Note: make sure to use the _latest state file_, as after scaling there is a new one.
|
||||
|
||||
By the way, with the scripts shown, you can deploy multiple clusters, if you like :)
|
||||
|
||||
|
@@ -18,7 +18,7 @@ If the status of any node is ```Unknown``` or ```NotReady``` your cluster is bro
|
||||
|
||||
### Run an application
|
||||
```sh
|
||||
kubectl -s http://localhost:8080 run-container nginx --image=nginx --port=80
|
||||
kubectl -s http://localhost:8080 run nginx --image=nginx --port=80
|
||||
```
|
||||
|
||||
now run ```docker ps``` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
|
||||
@@ -31,7 +31,7 @@ kubectl expose rc nginx --port=80
|
||||
This should print:
|
||||
```
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
nginx <none> run-container=nginx <ip-addr> 80/TCP
|
||||
nginx <none> run=nginx <ip-addr> 80/TCP
|
||||
```
|
||||
|
||||
Hit the webserver:
|
||||
@@ -46,7 +46,7 @@ Note that you will need run this curl command on your boot2docker VM if you are
|
||||
Now try to scale up the nginx you created before:
|
||||
|
||||
```sh
|
||||
kubectl resize rc nginx --replicas=3
|
||||
kubectl scale rc nginx --replicas=3
|
||||
```
|
||||
|
||||
And list the pods
|
||||
|
@@ -51,7 +51,7 @@ If you are running different kubernetes clusters, you may need to specify ```-s
|
||||
|
||||
### Run an application
|
||||
```sh
|
||||
kubectl -s http://localhost:8080 run-container nginx --image=nginx --port=80
|
||||
kubectl -s http://localhost:8080 run nginx --image=nginx --port=80
|
||||
```
|
||||
|
||||
now run ```docker ps``` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
|
||||
@@ -64,7 +64,7 @@ kubectl expose rc nginx --port=80
|
||||
This should print:
|
||||
```
|
||||
NAME LABELS SELECTOR IP PORT(S)
|
||||
nginx <none> run-container=nginx <ip-addr> 80/TCP
|
||||
nginx <none> run=nginx <ip-addr> 80/TCP
|
||||
```
|
||||
|
||||
Hit the webserver:
|
||||
|
@@ -47,7 +47,7 @@ You can now use any of the cluster/kubectl.sh commands to interact with your loc
|
||||
cluster/kubectl.sh get pods
|
||||
cluster/kubectl.sh get services
|
||||
cluster/kubectl.sh get replicationcontrollers
|
||||
cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=2 --port=80
|
||||
cluster/kubectl.sh run my-nginx --image=nginx --replicas=2 --port=80
|
||||
|
||||
|
||||
## begin wait for provision to complete, you can monitor the docker pull by opening a new terminal
|
||||
|
@@ -164,7 +164,7 @@ NAME IMAGE(S SELECTOR REPLICAS
|
||||
Start a container running nginx with a replication controller and three replicas
|
||||
|
||||
```sh
|
||||
$ ./cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80
|
||||
$ ./cluster/kubectl.sh run my-nginx --image=nginx --replicas=3 --port=80
|
||||
```
|
||||
|
||||
When listing the pods, you will see that three containers have been started and are in Waiting state:
|
||||
@@ -219,10 +219,10 @@ myNginx nginx name=my-nginx 3
|
||||
|
||||
We did not start any services, hence there are none listed. But we see three replicas displayed properly.
|
||||
Check the [guestbook](../../examples/guestbook/README.md) application to learn how to create a service.
|
||||
You can already play with resizing the replicas with:
|
||||
You can already play with scaling the replicas with:
|
||||
|
||||
```sh
|
||||
$ ./cluster/kubectl.sh resize rc my-nginx --replicas=2
|
||||
$ ./cluster/kubectl.sh scale rc my-nginx --replicas=2
|
||||
$ ./cluster/kubectl.sh get pods
|
||||
NAME IMAGE(S) HOST LABELS STATUS
|
||||
7813c8bd-3ffe-11e4-9036-0800279696e1 nginx 10.245.2.2/10.245.2.2 name=myNginx Running
|
||||
|
@@ -58,9 +58,9 @@ kubectl
|
||||
* [kubectl namespace](kubectl_namespace.md) - SUPERCEDED: Set and view the current Kubernetes namespace
|
||||
* [kubectl port-forward](kubectl_port-forward.md) - Forward one or more local ports to a pod.
|
||||
* [kubectl proxy](kubectl_proxy.md) - Run a proxy to the Kubernetes API server
|
||||
* [kubectl resize](kubectl_resize.md) - Set a new size for a Replication Controller.
|
||||
* [kubectl rolling-update](kubectl_rolling-update.md) - Perform a rolling update of the given ReplicationController.
|
||||
* [kubectl run-container](kubectl_run-container.md) - Run a particular image on the cluster.
|
||||
* [kubectl run](kubectl_run.md) - Run a particular image on the cluster.
|
||||
* [kubectl scale](kubectl_scale.md) - Set a new size for a Replication Controller.
|
||||
* [kubectl stop](kubectl_stop.md) - Gracefully shut down a resource by id or filename.
|
||||
* [kubectl update](kubectl_update.md) - Update a resource by filename or stdin.
|
||||
* [kubectl version](kubectl_version.md) - Print the client and server version information.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
## kubectl run-container
|
||||
## kubectl run
|
||||
|
||||
Run a particular image on the cluster.
|
||||
|
||||
@@ -9,34 +9,34 @@ Create and run a particular image, possibly replicated.
|
||||
Creates a replication controller to manage the created container(s).
|
||||
|
||||
```
|
||||
kubectl run-container NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]
|
||||
kubectl run NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
// Starts a single instance of nginx.
|
||||
$ kubectl run-container nginx --image=nginx
|
||||
$ kubectl run nginx --image=nginx
|
||||
|
||||
// Starts a replicated instance of nginx.
|
||||
$ kubectl run-container nginx --image=nginx --replicas=5
|
||||
$ kubectl run nginx --image=nginx --replicas=5
|
||||
|
||||
// Dry run. Print the corresponding API objects without creating them.
|
||||
$ kubectl run-container nginx --image=nginx --dry-run
|
||||
$ kubectl run nginx --image=nginx --dry-run
|
||||
|
||||
// Start a single instance of nginx, but overload the spec of the replication controller with a partial set of values parsed from JSON.
|
||||
$ kubectl run-container nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'
|
||||
$ kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--dry-run=false: If true, only print the object that would be sent, without sending it.
|
||||
--generator="run-container/v1": The name of the API generator to use. Default is 'run-container-controller/v1'.
|
||||
-h, --help=false: help for run-container
|
||||
--generator="run/v1": The name of the API generator to use. Default is 'run-controller/v1'.
|
||||
-h, --help=false: help for run
|
||||
--hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container.
|
||||
--image="": The image for the container to run.
|
||||
-l, --labels="": Labels to apply to the pod(s) created by this call to run-container.
|
||||
-l, --labels="": Labels to apply to the pod(s).
|
||||
--no-headers=false: When using the default output, don't print headers.
|
||||
-o, --output="": Output format. One of: json|yaml|template|templatefile.
|
||||
--output-version="": Output the formatted object with the given version (default api-version).
|
||||
@@ -80,4 +80,4 @@ $ kubectl run-container nginx --image=nginx --overrides='{ "apiVersion": "v1beta
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-05-21 10:33:11.189857293 +0000 UTC
|
||||
|
||||
[]()
|
||||
[]()
|
@@ -1,4 +1,4 @@
|
||||
## kubectl resize
|
||||
## kubectl scale
|
||||
|
||||
Set a new size for a Replication Controller.
|
||||
|
||||
@@ -7,32 +7,32 @@ Set a new size for a Replication Controller.
|
||||
|
||||
Set a new size for a Replication Controller.
|
||||
|
||||
Resize also allows users to specify one or more preconditions for the resize action.
|
||||
Scale also allows users to specify one or more preconditions for the scale action.
|
||||
If --current-replicas or --resource-version is specified, it is validated before the
|
||||
resize is attempted, and it is guaranteed that the precondition holds true when the
|
||||
resize is sent to the server.
|
||||
scale is attempted, and it is guaranteed that the precondition holds true when the
|
||||
scale is sent to the server.
|
||||
|
||||
```
|
||||
kubectl resize [--resource-version=version] [--current-replicas=count] --replicas=COUNT RESOURCE ID
|
||||
kubectl scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT RESOURCE ID
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
// Resize replication controller named 'foo' to 3.
|
||||
$ kubectl resize --replicas=3 replicationcontrollers foo
|
||||
// Scale replication controller named 'foo' to 3.
|
||||
$ kubectl scale --replicas=3 replicationcontrollers foo
|
||||
|
||||
// If the replication controller named foo's current size is 2, resize foo to 3.
|
||||
$ kubectl resize --current-replicas=2 --replicas=3 replicationcontrollers foo
|
||||
// If the replication controller named foo's current size is 2, scale foo to 3.
|
||||
$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--current-replicas=-1: Precondition for current size. Requires that the current size of the replication controller match this value in order to resize.
|
||||
-h, --help=false: help for resize
|
||||
--current-replicas=-1: Precondition for current size. Requires that the current size of the replication controller match this value in order to scale.
|
||||
-h, --help=false: help for scale
|
||||
--replicas=-1: The new desired number of replicas. Required.
|
||||
--resource-version="": Precondition for resource version. Requires that the current resource version match this value in order to resize.
|
||||
--resource-version="": Precondition for resource version. Requires that the current resource version match this value in order to scale.
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
@@ -69,4 +69,4 @@ $ kubectl resize --current-replicas=2 --replicas=3 replicationcontrollers foo
|
||||
|
||||
###### Auto generated by spf13/cobra at 2015-05-21 10:33:11.185268791 +0000 UTC
|
||||
|
||||
[]()
|
||||
[]()
|
@@ -8,7 +8,7 @@ Gracefully shut down a resource by id or filename.
|
||||
Gracefully shut down a resource by id or filename.
|
||||
|
||||
Attempts to shut down and delete a resource that supports graceful termination.
|
||||
If the resource is resizable it will be resized to 0 before deletion.
|
||||
If the resource is scalable it will be scaled to 0 before deletion.
|
||||
|
||||
```
|
||||
kubectl stop (-f FILENAME | RESOURCE (ID | -l label | --all))
|
||||
|
@@ -19,9 +19,9 @@ kubectl-logs.1
|
||||
kubectl-namespace.1
|
||||
kubectl-port-forward.1
|
||||
kubectl-proxy.1
|
||||
kubectl-resize.1
|
||||
kubectl-rolling-update.1
|
||||
kubectl-run-container.1
|
||||
kubectl-run.1
|
||||
kubectl-scale.1
|
||||
kubectl-stop.1
|
||||
kubectl-update.1
|
||||
kubectl-version.1
|
||||
|
@@ -3,12 +3,12 @@
|
||||
|
||||
.SH NAME
|
||||
.PP
|
||||
kubectl run\-container \- Run a particular image on the cluster.
|
||||
kubectl run \- Run a particular image on the cluster.
|
||||
|
||||
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
\fBkubectl run\-container\fP [OPTIONS]
|
||||
\fBkubectl run\fP [OPTIONS]
|
||||
|
||||
|
||||
.SH DESCRIPTION
|
||||
@@ -23,12 +23,12 @@ Creates a replication controller to manage the created container(s).
|
||||
If true, only print the object that would be sent, without sending it.
|
||||
|
||||
.PP
|
||||
\fB\-\-generator\fP="run\-container/v1"
|
||||
The name of the API generator to use. Default is 'run\-container\-controller/v1'.
|
||||
\fB\-\-generator\fP="run/v1"
|
||||
The name of the API generator to use. Default is 'run\-controller/v1'.
|
||||
|
||||
.PP
|
||||
\fB\-h\fP, \fB\-\-help\fP=false
|
||||
help for run\-container
|
||||
help for run
|
||||
|
||||
.PP
|
||||
\fB\-\-hostport\fP=\-1
|
||||
@@ -40,7 +40,7 @@ Creates a replication controller to manage the created container(s).
|
||||
|
||||
.PP
|
||||
\fB\-l\fP, \fB\-\-labels\fP=""
|
||||
Labels to apply to the pod(s) created by this call to run\-container.
|
||||
Labels to apply to the pod(s).
|
||||
|
||||
.PP
|
||||
\fB\-\-no\-headers\fP=false
|
||||
@@ -176,16 +176,16 @@ Creates a replication controller to manage the created container(s).
|
||||
|
||||
.nf
|
||||
// Starts a single instance of nginx.
|
||||
$ kubectl run\-container nginx \-\-image=nginx
|
||||
$ kubectl run nginx \-\-image=nginx
|
||||
|
||||
// Starts a replicated instance of nginx.
|
||||
$ kubectl run\-container nginx \-\-image=nginx \-\-replicas=5
|
||||
$ kubectl run nginx \-\-image=nginx \-\-replicas=5
|
||||
|
||||
// Dry run. Print the corresponding API objects without creating them.
|
||||
$ kubectl run\-container nginx \-\-image=nginx \-\-dry\-run
|
||||
$ kubectl run nginx \-\-image=nginx \-\-dry\-run
|
||||
|
||||
// Start a single instance of nginx, but overload the spec of the replication controller with a partial set of values parsed from JSON.
|
||||
$ kubectl run\-container nginx \-\-image=nginx \-\-overrides='\{ "apiVersion": "v1beta3", "spec": \{ ... \} \}'
|
||||
$ kubectl run nginx \-\-image=nginx \-\-overrides='\{ "apiVersion": "v1beta3", "spec": \{ ... \} \}'
|
||||
|
||||
.fi
|
||||
.RE
|
@@ -3,12 +3,12 @@
|
||||
|
||||
.SH NAME
|
||||
.PP
|
||||
kubectl resize \- Set a new size for a Replication Controller.
|
||||
kubectl scale \- Set a new size for a Replication Controller.
|
||||
|
||||
|
||||
.SH SYNOPSIS
|
||||
.PP
|
||||
\fBkubectl resize\fP [OPTIONS]
|
||||
\fBkubectl scale\fP [OPTIONS]
|
||||
|
||||
|
||||
.SH DESCRIPTION
|
||||
@@ -16,20 +16,20 @@ kubectl resize \- Set a new size for a Replication Controller.
|
||||
Set a new size for a Replication Controller.
|
||||
|
||||
.PP
|
||||
Resize also allows users to specify one or more preconditions for the resize action.
|
||||
Scale also allows users to specify one or more preconditions for the scale action.
|
||||
If \-\-current\-replicas or \-\-resource\-version is specified, it is validated before the
|
||||
resize is attempted, and it is guaranteed that the precondition holds true when the
|
||||
resize is sent to the server.
|
||||
scale is attempted, and it is guaranteed that the precondition holds true when the
|
||||
scale is sent to the server.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
.PP
|
||||
\fB\-\-current\-replicas\fP=\-1
|
||||
Precondition for current size. Requires that the current size of the replication controller match this value in order to resize.
|
||||
Precondition for current size. Requires that the current size of the replication controller match this value in order to scale.
|
||||
|
||||
.PP
|
||||
\fB\-h\fP, \fB\-\-help\fP=false
|
||||
help for resize
|
||||
help for scale
|
||||
|
||||
.PP
|
||||
\fB\-\-replicas\fP=\-1
|
||||
@@ -37,7 +37,7 @@ resize is sent to the server.
|
||||
|
||||
.PP
|
||||
\fB\-\-resource\-version\fP=""
|
||||
Precondition for resource version. Requires that the current resource version match this value in order to resize.
|
||||
Precondition for resource version. Requires that the current resource version match this value in order to scale.
|
||||
|
||||
|
||||
.SH OPTIONS INHERITED FROM PARENT COMMANDS
|
||||
@@ -143,11 +143,11 @@ resize is sent to the server.
|
||||
.RS
|
||||
|
||||
.nf
|
||||
// Resize replication controller named 'foo' to 3.
|
||||
$ kubectl resize \-\-replicas=3 replicationcontrollers foo
|
||||
// Scale replication controller named 'foo' to 3.
|
||||
$ kubectl scale \-\-replicas=3 replicationcontrollers foo
|
||||
|
||||
// If the replication controller named foo's current size is 2, resize foo to 3.
|
||||
$ kubectl resize \-\-current\-replicas=2 \-\-replicas=3 replicationcontrollers foo
|
||||
// If the replication controller named foo's current size is 2, scale foo to 3.
|
||||
$ kubectl scale \-\-current\-replicas=2 \-\-replicas=3 replicationcontrollers foo
|
||||
|
||||
.fi
|
||||
.RE
|
@@ -17,7 +17,7 @@ Gracefully shut down a resource by id or filename.
|
||||
|
||||
.PP
|
||||
Attempts to shut down and delete a resource that supports graceful termination.
|
||||
If the resource is resizable it will be resized to 0 before deletion.
|
||||
If the resource is scalable it will be scaled to 0 before deletion.
|
||||
|
||||
|
||||
.SH OPTIONS
|
||||
|
@@ -124,7 +124,7 @@ Find more information at
|
||||
|
||||
.SH SEE ALSO
|
||||
.PP
|
||||
\fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-update(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-logs(1)\fP, \fBkubectl\-rolling\-update(1)\fP, \fBkubectl\-resize(1)\fP, \fBkubectl\-exec(1)\fP, \fBkubectl\-port\-forward(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-run\-container(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-cluster\-info(1)\fP, \fBkubectl\-api\-versions(1)\fP, \fBkubectl\-version(1)\fP,
|
||||
\fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-update(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-logs(1)\fP, \fBkubectl\-rolling\-update(1)\fP, \fBkubectl\-scale(1)\fP, \fBkubectl\-exec(1)\fP, \fBkubectl\-port\-forward(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-run(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-cluster\-info(1)\fP, \fBkubectl\-api\-versions(1)\fP, \fBkubectl\-version(1)\fP,
|
||||
|
||||
|
||||
.SH HISTORY
|
||||
|
@@ -12,7 +12,7 @@ done automatically based on statistical analysis and thresholds.
|
||||
|
||||
* Provide a concrete proposal for implementing auto-scaling pods within Kubernetes
|
||||
* Implementation proposal should be in line with current discussions in existing issues:
|
||||
* Resize verb - [1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629)
|
||||
* Scale verb - [1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629)
|
||||
* Config conflicts - [Config](https://github.com/GoogleCloudPlatform/kubernetes/blob/c7cb991987193d4ca33544137a5cb7d0292cf7df/docs/config.md#automated-re-configuration-processes)
|
||||
* Rolling updates - [1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353)
|
||||
* Multiple scalable types - [1624](https://github.com/GoogleCloudPlatform/kubernetes/issues/1624)
|
||||
@@ -23,7 +23,7 @@ done automatically based on statistical analysis and thresholds.
|
||||
* `ReplicationControllers` will not know about the auto-scaler, they are the target of the auto-scaler. The `ReplicationController` responsibilities are
|
||||
constrained to only ensuring that the desired number of pods are operational per the [Replication Controller Design](http://docs.k8s.io/replication-controller.md#responsibilities-of-the-replication-controller)
|
||||
* Auto-scalers will be loosely coupled with data gathering components in order to allow a wide variety of input sources
|
||||
* Auto-scalable resources will support a resize verb ([1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629))
|
||||
* Auto-scalable resources will support a scale verb ([1629](https://github.com/GoogleCloudPlatform/kubernetes/issues/1629))
|
||||
such that the auto-scaler does not directly manipulate the underlying resource.
|
||||
* Initially, most thresholds will be set by application administrators. It should be possible for an autoscaler to be
|
||||
written later that sets thresholds automatically based on past behavior (CPU used vs incoming requests).
|
||||
@@ -31,7 +31,7 @@ written later that sets thresholds automatically based on past behavior (CPU use
|
||||
explicitly setting the replica count to 0 should mean that the auto-scaler does not try to scale the application up)
|
||||
* It should be possible to write and deploy a custom auto-scaler without modifying existing auto-scalers
|
||||
* Auto-scalers must be able to monitor multiple replication controllers while only targeting a single scalable
|
||||
object (for now a ReplicationController, but in the future it could be a job or any resource that implements resize)
|
||||
object (for now a ReplicationController, but in the future it could be a job or any resource that implements scale)
|
||||
|
||||
## Use Cases
|
||||
|
||||
@@ -68,13 +68,13 @@ In order to facilitate talking about auto-scaling the following definitions are
|
||||
* `ReplicationController` - the first building block of auto scaling. Pods are deployed and scaled by a `ReplicationController`.
|
||||
* kube proxy - The proxy handles internal inter-pod traffic, an example of a data source to drive an auto-scaler
|
||||
* L3/L7 proxies - A routing layer handling outside to inside traffic requests, an example of a data source to drive an auto-scaler
|
||||
* auto-scaler - scales replicas up and down by using the `resize` endpoint provided by scalable resources (`ReplicationController`)
|
||||
* auto-scaler - scales replicas up and down by using the `scale` endpoint provided by scalable resources (`ReplicationController`)
|
||||
|
||||
|
||||
### Auto-Scaler
|
||||
|
||||
The Auto-Scaler is a state reconciler responsible for checking data against configured scaling thresholds
|
||||
and calling the `resize` endpoint to change the number of replicas. The scaler will
|
||||
and calling the `scale` endpoint to change the number of replicas. The scaler will
|
||||
use a client/cache implementation to receive watch data from the data aggregators and respond to them by
|
||||
scaling the application. Auto-scalers are created and defined like other resources via REST endpoints and belong to the
|
||||
namespace just as a `ReplicationController` or `Service`.
|
||||
@@ -84,7 +84,7 @@ Since an auto-scaler is a durable object it is best represented as a resource.
|
||||
```go
|
||||
//The auto scaler interface
|
||||
type AutoScalerInterface interface {
|
||||
//ScaleApplication adjusts a resource's replica count. Calls resize endpoint.
|
||||
//ScaleApplication adjusts a resource's replica count. Calls scale endpoint.
|
||||
//Args to this are based on what the endpoint
|
||||
//can support. See https://github.com/GoogleCloudPlatform/kubernetes/issues/1629
|
||||
ScaleApplication(num int) error
|
||||
@@ -118,8 +118,8 @@ Since an auto-scaler is a durable object it is best represented as a resource.
|
||||
//0 means that the application is allowed to idle
|
||||
MinAutoScaleCount int
|
||||
|
||||
//TargetSelector provides the resizeable target(s). Right now this is a ReplicationController
|
||||
//in the future it could be a job or any resource that implements resize.
|
||||
//TargetSelector provides the scalable target(s). Right now this is a ReplicationController
|
||||
//in the future it could be a job or any resource that implements scale.
|
||||
TargetSelector map[string]string
|
||||
|
||||
//MonitorSelector defines a set of capacity that the auto-scaler is monitoring
|
||||
@@ -219,8 +219,8 @@ Of note: If the statistics gathering mechanisms can be initialized with a regist
|
||||
potentially piggyback on this registry.
|
||||
|
||||
### Multi-target Scaling Policy
|
||||
If multiple resizable targets satisfy the `TargetSelector` criteria the auto-scaler should be configurable as to which
|
||||
target(s) are resized. To begin with, if multiple targets are found the auto-scaler will scale the largest target up
|
||||
If multiple scalable targets satisfy the `TargetSelector` criteria the auto-scaler should be configurable as to which
|
||||
target(s) are scaled. To begin with, if multiple targets are found the auto-scaler will scale the largest target up
|
||||
or down as appropriate. In the future this may be more configurable.
|
||||
|
||||
### Interactions with a deployment
|
||||
|
@@ -36,7 +36,7 @@ The replication controller simply ensures that the desired number of pods matche
|
||||
|
||||
The replication controller is forever constrained to this narrow responsibility. It itself will not perform readiness nor liveness probes. Rather than performing auto-scaling, it is intended to be controlled by an external auto-scaler (as discussed in [#492](https://github.com/GoogleCloudPlatform/kubernetes/issues/492)), which would change its `replicas` field. We will not add scheduling policies (e.g., [spreading](https://github.com/GoogleCloudPlatform/kubernetes/issues/367#issuecomment-48428019)) to replication controller. Nor should it verify that the pods controlled match the currently specified template, as that would obstruct auto-sizing and other automated processes. Similarly, completion deadlines, ordering dependencies, configuration expansion, and other features belong elsewhere. We even plan to factor out the mechanism for bulk pod creation ([#170](https://github.com/GoogleCloudPlatform/kubernetes/issues/170)).
|
||||
|
||||
The replication controller is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run-container, stop, resize, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing replication controllers, auto-scalers, services, scheduling policies, canaries, etc.
|
||||
The replication controller is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, stop, scale, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing replication controllers, auto-scalers, services, scheduling policies, canaries, etc.
|
||||
|
||||
## Common usage patterns
|
||||
|
||||
@@ -52,7 +52,7 @@ Replication controller makes it easy to scale the number of replicas up or down,
|
||||
|
||||
Replication controller is designed to facilitate rolling updates to a service by replacing pods one-by-one.
|
||||
|
||||
As explained in [#1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353), the recommended approach is to create a new replication controller with 1 replica, resize the new (+1) and old (-1) controllers one by one, and then delete the old controller after it reaches 0 replicas. This predictably updates the set of pods regardless of unexpected failures.
|
||||
As explained in [#1353](https://github.com/GoogleCloudPlatform/kubernetes/issues/1353), the recommended approach is to create a new replication controller with 1 replica, scale the new (+1) and old (-1) controllers one by one, and then delete the old controller after it reaches 0 replicas. This predictably updates the set of pods regardless of unexpected failures.
|
||||
|
||||
Ideally, the rolling update controller would take application readiness into account, and would ensure that a sufficient number of pods were productively serving at any given time.
|
||||
|
||||
|
@@ -125,7 +125,7 @@ subsets:
|
||||
You can see that the _Service_ has found the pod we created in step one.
|
||||
|
||||
### Adding replicated nodes
|
||||
Of course, a single node cluster isn't particularly interesting. The real power of Kubernetes and Cassandra lies in easily building a replicated, resizable Cassandra cluster.
|
||||
Of course, a single node cluster isn't particularly interesting. The real power of Kubernetes and Cassandra lies in easily building a replicated, scalable Cassandra cluster.
|
||||
|
||||
In Kubernetes a _Replication Controller_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
|
||||
@@ -185,9 +185,9 @@ $ kubectl create -f cassandra-controller.yaml
|
||||
|
||||
Now this is actually not that interesting, since we haven't actually done anything new. Now it will get interesting.
|
||||
|
||||
Let's resize our cluster to 2:
|
||||
Let's scale our cluster to 2:
|
||||
```sh
|
||||
$ kubectl resize rc cassandra --replicas=2
|
||||
$ kubectl scale rc cassandra --replicas=2
|
||||
```
|
||||
|
||||
Now if you list the pods in your cluster, you should see two cassandra pods:
|
||||
@@ -218,9 +218,9 @@ UN 10.244.0.5 74.09 KB 256 100.0% 86feda0f-f070-4a5b-bda1-2ee
|
||||
UN 10.244.3.3 51.28 KB 256 100.0% dafe3154-1d67-42e1-ac1d-78e7e80dce2b rack1
|
||||
```
|
||||
|
||||
Now let's resize our cluster to 4 nodes:
|
||||
Now let's scale our cluster to 4 nodes:
|
||||
```sh
|
||||
$ kubectl resize rc cassandra --replicas=4
|
||||
$ kubectl scale rc cassandra --replicas=4
|
||||
```
|
||||
|
||||
Examining the status again:
|
||||
@@ -251,13 +251,13 @@ kubectl create -f cassandra-service.yaml
|
||||
kubectl create -f cassandra-controller.yaml
|
||||
|
||||
# scale up to 2 nodes
|
||||
kubectl resize rc cassandra --replicas=2
|
||||
kubectl scale rc cassandra --replicas=2
|
||||
|
||||
# validate the cluster
|
||||
docker exec <container-id> nodetool status
|
||||
|
||||
# scale up to 4 nodes
|
||||
kubectl resize rc cassandra --replicas=4
|
||||
kubectl scale rc cassandra --replicas=4
|
||||
```
|
||||
|
||||
### Seed Provider Source
|
||||
|
@@ -235,8 +235,8 @@ $ curl 104.197.12.157:9200/_nodes?pretty=true
|
||||
```
|
||||
Let's ramp up the number of Elasticsearch nodes from 4 to 10:
|
||||
```
|
||||
$ kubectl resize --replicas=10 replicationcontrollers music-db --namespace=mytunes
|
||||
resized
|
||||
$ kubectl scale --replicas=10 replicationcontrollers music-db --namespace=mytunes
|
||||
scaled
|
||||
$ kubectl get pods --namespace=mytunes
|
||||
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE
|
||||
music-db-0fwsu 10.244.2.48 kubernetes-minion-m49b/104.197.35.221 name=music-db Running 33 minutes
|
||||
|
@@ -52,7 +52,7 @@ $ kubectl create -f hazelcast-service.yaml
|
||||
```
|
||||
|
||||
### Adding replicated nodes
|
||||
The real power of Kubernetes and Hazelcast lies in easily building a replicated, resizable Hazelcast cluster.
|
||||
The real power of Kubernetes and Hazelcast lies in easily building a replicated, scalable Hazelcast cluster.
|
||||
|
||||
In Kubernetes a _Replication Controller_ is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a _Service_ it also has a desired number of replicas, and it will create or delete _Pods_ to ensure that the number of _Pods_ matches up with it's desired state.
|
||||
|
||||
@@ -129,9 +129,9 @@ You can see that the _Service_ has found the pod created by the replication cont
|
||||
|
||||
Now it gets even more interesting.
|
||||
|
||||
Let's resize our cluster to 2 pods:
|
||||
Let's scale our cluster to 2 pods:
|
||||
```sh
|
||||
$ kubectl resize rc hazelcast --replicas=2
|
||||
$ kubectl scale rc hazelcast --replicas=2
|
||||
```
|
||||
|
||||
Now if you list the pods in your cluster, you should see two hazelcast pods:
|
||||
@@ -175,9 +175,9 @@ Members [2] {
|
||||
2015-05-09 22:06:31.177 INFO 5 --- [ main] com.hazelcast.core.LifecycleService : [10.244.66.2]:5701 [someGroup] [3.4.2] Address[10.244.66.2]:5701 is STARTED
|
||||
```
|
||||
|
||||
Now let's resize our cluster to 4 nodes:
|
||||
Now let's scale our cluster to 4 nodes:
|
||||
```sh
|
||||
$ kubectl resize rc hazelcast --replicas=4
|
||||
$ kubectl scale rc hazelcast --replicas=4
|
||||
```
|
||||
|
||||
Examine the status again by checking a node’s log and you should see the 4 members connected.
|
||||
@@ -193,10 +193,10 @@ kubectl create -f hazelcast-service.yaml
|
||||
kubectl create -f hazelcast-controller.yaml
|
||||
|
||||
# scale up to 2 nodes
|
||||
kubectl resize rc hazelcast --replicas=2
|
||||
kubectl scale rc hazelcast --replicas=2
|
||||
|
||||
# scale up to 4 nodes
|
||||
kubectl resize rc hazelcast --replicas=4
|
||||
kubectl scale rc hazelcast --replicas=4
|
||||
```
|
||||
|
||||
### Hazelcast Discovery Source
|
||||
|
@@ -184,7 +184,7 @@ At this point, all requests we make to the Kubernetes cluster from the command l
|
||||
Let's create some content.
|
||||
|
||||
```shell
|
||||
$ cluster/kubectl.sh run-container snowflake --image=kubernetes/serve_hostname --replicas=2
|
||||
$ cluster/kubectl.sh run snowflake --image=kubernetes/serve_hostname --replicas=2
|
||||
```
|
||||
|
||||
We have just created a replication controller whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname.
|
||||
@@ -192,13 +192,13 @@ We have just created a replication controller whose replica size is 2 that is ru
|
||||
```shell
|
||||
cluster/kubectl.sh get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
snowflake snowflake kubernetes/serve_hostname run-container=snowflake 2
|
||||
snowflake snowflake kubernetes/serve_hostname run=snowflake 2
|
||||
|
||||
$ cluster/kubectl.sh get pods
|
||||
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE
|
||||
snowflake-mbrfi 10.244.2.4 kubernetes-minion-ilqx/104.197.8.214 run-container=snowflake Running About an hour
|
||||
snowflake-mbrfi 10.244.2.4 kubernetes-minion-ilqx/104.197.8.214 run=snowflake Running About an hour
|
||||
snowflake kubernetes/serve_hostname Running About an hour
|
||||
snowflake-p78ev 10.244.2.5 kubernetes-minion-ilqx/104.197.8.214 run-container=snowflake Running About an hour
|
||||
snowflake-p78ev 10.244.2.5 kubernetes-minion-ilqx/104.197.8.214 run=snowflake Running About an hour
|
||||
snowflake kubernetes/serve_hostname Running About an hour
|
||||
```
|
||||
|
||||
@@ -223,23 +223,23 @@ POD IP CONTAINER(S) IMAGE(S)
|
||||
Production likes to run cattle, so let's create some cattle pods.
|
||||
|
||||
```shell
|
||||
$ cluster/kubectl.sh run-container cattle --image=kubernetes/serve_hostname --replicas=5
|
||||
$ cluster/kubectl.sh run cattle --image=kubernetes/serve_hostname --replicas=5
|
||||
|
||||
$ cluster/kubectl.sh get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
cattle cattle kubernetes/serve_hostname run-container=cattle 5
|
||||
cattle cattle kubernetes/serve_hostname run=cattle 5
|
||||
|
||||
$ cluster/kubectl.sh get pods
|
||||
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE
|
||||
cattle-1kyvj 10.244.0.4 kubernetes-minion-7s1y/23.236.54.97 run-container=cattle Running About an hour
|
||||
cattle-1kyvj 10.244.0.4 kubernetes-minion-7s1y/23.236.54.97 run=cattle Running About an hour
|
||||
cattle kubernetes/serve_hostname Running About an hour
|
||||
cattle-kobrk 10.244.1.4 kubernetes-minion-cfs6/104.154.61.231 run-container=cattle Running About an hour
|
||||
cattle-kobrk 10.244.1.4 kubernetes-minion-cfs6/104.154.61.231 run=cattle Running About an hour
|
||||
cattle kubernetes/serve_hostname Running About an hour
|
||||
cattle-l1v9t 10.244.0.5 kubernetes-minion-7s1y/23.236.54.97 run-container=cattle Running About an hour
|
||||
cattle-l1v9t 10.244.0.5 kubernetes-minion-7s1y/23.236.54.97 run=cattle Running About an hour
|
||||
cattle kubernetes/serve_hostname Running About an hour
|
||||
cattle-ne2sj 10.244.3.7 kubernetes-minion-x8gx/104.154.47.83 run-container=cattle Running About an hour
|
||||
cattle-ne2sj 10.244.3.7 kubernetes-minion-x8gx/104.154.47.83 run=cattle Running About an hour
|
||||
cattle kubernetes/serve_hostname Running About an hour
|
||||
cattle-qrk4x 10.244.0.6 kubernetes-minion-7s1y/23.236.54.97 run-container=cattle Running About an hour
|
||||
cattle-qrk4x 10.244.0.6 kubernetes-minion-7s1y/23.236.54.97 run=cattle Running About an hour
|
||||
cattle kubernetes/serve_hostname
|
||||
```
|
||||
|
||||
|
@@ -56,15 +56,15 @@ We create it as follows:
|
||||
kubectl create -f examples/redis/redis-sentinel-controller.yaml
|
||||
```
|
||||
|
||||
### Resize our replicated pods
|
||||
### Scale our replicated pods
|
||||
Initially creating those pods didn't actually do anything, since we only asked for one sentinel and one redis server, and they already existed, nothing changed. Now we will add more replicas:
|
||||
|
||||
```sh
|
||||
kubectl resize rc redis --replicas=3
|
||||
kubectl scale rc redis --replicas=3
|
||||
```
|
||||
|
||||
```sh
|
||||
kubectl resize rc redis-sentinel --replicas=3
|
||||
kubectl scale rc redis-sentinel --replicas=3
|
||||
```
|
||||
|
||||
This will create two additional replicas of the redis server and two additional replicas of the redis sentinel.
|
||||
@@ -86,7 +86,7 @@ Now let's take a close look at what happens after this pod is deleted. There ar
|
||||
3. The redis sentinels themselves, realize that the master has disappeared from the cluster, and begin the election procedure for selecting a new master. They perform this election and selection, and chose one of the existing redis server replicas to be the new master.
|
||||
|
||||
### Conclusion
|
||||
At this point we now have a reliable, scalable Redis installation. By resizing the replication controller for redis servers, we can increase or decrease the number of read-slaves in our cluster. Likewise, if failures occur, the redis-sentinels will perform master election and select a new master.
|
||||
At this point we now have a reliable, scalable Redis installation. By scaling the replication controller for redis servers, we can increase or decrease the number of read-slaves in our cluster. Likewise, if failures occur, the redis-sentinels will perform master election and select a new master.
|
||||
|
||||
### tl; dr
|
||||
For those of you who are impatient, here is the summary of commands we ran in this tutorial
|
||||
@@ -104,9 +104,9 @@ kubectl create -f examples/redis/redis-controller.yaml
|
||||
# Create a replication controller for redis sentinels
|
||||
kubectl create -f examples/redis/redis-sentinel-controller.yaml
|
||||
|
||||
# Resize both replication controllers
|
||||
kubectl resize rc redis --replicas=3
|
||||
kubectl resize rc redis-sentinel --replicas=3
|
||||
# Scale both replication controllers
|
||||
kubectl scale rc redis --replicas=3
|
||||
kubectl scale rc redis-sentinel --replicas=3
|
||||
|
||||
# Delete the original master pod
|
||||
kubectl delete pods redis-master
|
||||
|
@@ -61,12 +61,12 @@ rethinkdb-rc-1.16.0-6odi0
|
||||
Scale
|
||||
-----
|
||||
|
||||
You can scale up you cluster using `kubectl resize`, and new pod will join to exsits cluster automatically, for example
|
||||
You can scale up you cluster using `kubectl scale`, and new pod will join to exsits cluster automatically, for example
|
||||
|
||||
|
||||
```shell
|
||||
$kubectl resize rc rethinkdb-rc-1.16.0 --replicas=3
|
||||
resized
|
||||
$kubectl scale rc rethinkdb-rc-1.16.0 --replicas=3
|
||||
scaled
|
||||
$kubectl get po
|
||||
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE
|
||||
rethinkdb-rc-1.16.0-6odi0 10.244.3.3 kubernetes-minion-s59e/104.197.79.42 db=rethinkdb,role=replicas Running About a minute
|
||||
|
@@ -12,7 +12,7 @@ The `kubectl` line below spins up two containers running
|
||||
[Nginx](http://nginx.org/en/) running on port 80:
|
||||
|
||||
```bash
|
||||
kubectl run-container my-nginx --image=nginx --replicas=2 --port=80
|
||||
kubectl run my-nginx --image=nginx --replicas=2 --port=80
|
||||
```
|
||||
|
||||
Once the pods are created, you can list them to see what is up and running:
|
||||
|
@@ -47,12 +47,12 @@ $ ./cluster/kubectl.sh create -f examples/update-demo/nautilus-rc.yaml
|
||||
|
||||
After pulling the image from the Docker Hub to your worker nodes (which may take a minute or so) you'll see a couple of squares in the UI detailing the pods that are running along with the image that they are serving up. A cute little nautilus.
|
||||
|
||||
### Step Three: Try resizing the controller
|
||||
### Step Three: Try scaling the controller
|
||||
|
||||
Now we will increase the number of replicas from two to four:
|
||||
|
||||
```bash
|
||||
$ ./cluster/kubectl.sh resize rc update-demo-nautilus --replicas=4
|
||||
$ ./cluster/kubectl.sh scale rc update-demo-nautilus --replicas=4
|
||||
```
|
||||
|
||||
If you go back to the [demo website](http://localhost:8001/static/index.html) you should eventually see four boxes, one for each pod.
|
||||
@@ -66,7 +66,7 @@ $ ./cluster/kubectl.sh rolling-update update-demo-nautilus --update-period=10s -
|
||||
The rolling-update command in kubectl will do 2 things:
|
||||
|
||||
1. Create a new replication controller with a pod template that uses the new image (`gcr.io/google_containers/update-demo:kitten`)
|
||||
2. Resize the old and new replication controllers until the new controller replaces the old. This will kill the current pods one at a time, spinnning up new ones to replace them.
|
||||
2. Scale the old and new replication controllers until the new controller replaces the old. This will kill the current pods one at a time, spinnning up new ones to replace them.
|
||||
|
||||
Watch the [demo website](http://localhost:8001/static/index.html), it will update one pod every 10 seconds until all of the pods have the new image.
|
||||
|
||||
|
@@ -499,27 +499,27 @@ __EOF__
|
||||
# Describe command should print detailed information
|
||||
kube::test::describe_object_assert rc 'frontend' "Name:" "Image(s):" "Labels:" "Selector:" "Replicas:" "Pods Status:"
|
||||
|
||||
### Resize replication controller frontend with current-replicas and replicas
|
||||
### Scale replication controller frontend with current-replicas and replicas
|
||||
# Pre-condition: 3 replicas
|
||||
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
|
||||
# Command
|
||||
kubectl resize --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
|
||||
kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
|
||||
# Post-condition: 2 replicas
|
||||
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
||||
|
||||
### Resize replication controller frontend with (wrong) current-replicas and replicas
|
||||
### Scale replication controller frontend with (wrong) current-replicas and replicas
|
||||
# Pre-condition: 2 replicas
|
||||
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
||||
# Command
|
||||
! kubectl resize --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
|
||||
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
|
||||
# Post-condition: nothing changed
|
||||
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
||||
|
||||
### Resize replication controller frontend with replicas only
|
||||
### Scale replication controller frontend with replicas only
|
||||
# Pre-condition: 2 replicas
|
||||
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
|
||||
# Command
|
||||
kubectl resize --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
|
||||
kubectl scale --replicas=3 replicationcontrollers frontend "${kube_flags[@]}"
|
||||
# Post-condition: 3 replicas
|
||||
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
|
||||
|
||||
|
@@ -138,7 +138,7 @@ func NewReplicationManager(kubeClient client.Interface, burstReplicas int) *Repl
|
||||
rm.enqueueController(cur)
|
||||
},
|
||||
// This will enter the sync loop and no-op, becuase the controller has been deleted from the store.
|
||||
// Note that deleting a controller immediately after resizing it to 0 will not work. The recommended
|
||||
// Note that deleting a controller immediately after scaling it to 0 will not work. The recommended
|
||||
// way of achieving this is by performing a `stop` operation on the controller.
|
||||
DeleteFunc: rm.enqueueController,
|
||||
},
|
||||
|
@@ -120,13 +120,13 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
|
||||
cmds.AddCommand(NewCmdNamespace(out))
|
||||
cmds.AddCommand(NewCmdLog(f, out))
|
||||
cmds.AddCommand(NewCmdRollingUpdate(f, out))
|
||||
cmds.AddCommand(NewCmdResize(f, out))
|
||||
cmds.AddCommand(NewCmdScale(f, out))
|
||||
|
||||
cmds.AddCommand(NewCmdExec(f, in, out, err))
|
||||
cmds.AddCommand(NewCmdPortForward(f))
|
||||
cmds.AddCommand(NewCmdProxy(f, out))
|
||||
|
||||
cmds.AddCommand(NewCmdRunContainer(f, out))
|
||||
cmds.AddCommand(NewCmdRun(f, out))
|
||||
cmds.AddCommand(NewCmdStop(f, out))
|
||||
cmds.AddCommand(NewCmdExposeService(f, out))
|
||||
|
||||
|
@@ -173,8 +173,8 @@ func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) {
|
||||
Validator: validation.NullSchema{},
|
||||
}
|
||||
generators := map[string]kubectl.Generator{
|
||||
"run-container/v1": kubectl.BasicReplicationController{},
|
||||
"service/v1": kubectl.ServiceGenerator{},
|
||||
"run/v1": kubectl.BasicReplicationController{},
|
||||
"service/v1": kubectl.ServiceGenerator{},
|
||||
}
|
||||
return &cmdutil.Factory{
|
||||
Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
|
||||
@@ -245,7 +245,7 @@ func ExamplePrintReplicationController() {
|
||||
Codec: codec,
|
||||
Client: nil,
|
||||
}
|
||||
cmd := NewCmdRunContainer(f, os.Stdout)
|
||||
cmd := NewCmdRun(f, os.Stdout)
|
||||
ctrl := &api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
|
@@ -227,7 +227,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg
|
||||
return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
|
||||
filename, oldName)
|
||||
}
|
||||
// TODO: handle resizes during rolling update
|
||||
// TODO: handle scales during rolling update
|
||||
if newRc.Spec.Replicas == 0 {
|
||||
newRc.Spec.Replicas = oldRc.Spec.Replicas
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
|
||||
@@ -30,31 +31,33 @@ const (
|
||||
run_long = `Create and run a particular image, possibly replicated.
|
||||
Creates a replication controller to manage the created container(s).`
|
||||
run_example = `// Starts a single instance of nginx.
|
||||
$ kubectl run-container nginx --image=nginx
|
||||
$ kubectl run nginx --image=nginx
|
||||
|
||||
// Starts a replicated instance of nginx.
|
||||
$ kubectl run-container nginx --image=nginx --replicas=5
|
||||
$ kubectl run nginx --image=nginx --replicas=5
|
||||
|
||||
// Dry run. Print the corresponding API objects without creating them.
|
||||
$ kubectl run-container nginx --image=nginx --dry-run
|
||||
$ kubectl run nginx --image=nginx --dry-run
|
||||
|
||||
// Start a single instance of nginx, but overload the spec of the replication controller with a partial set of values parsed from JSON.
|
||||
$ kubectl run-container nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'`
|
||||
$ kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'`
|
||||
)
|
||||
|
||||
func NewCmdRunContainer(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
func NewCmdRun(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "run-container NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]",
|
||||
Use: "run NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]",
|
||||
// run-container is deprecated
|
||||
Aliases: []string{"run-container"},
|
||||
Short: "Run a particular image on the cluster.",
|
||||
Long: run_long,
|
||||
Example: run_example,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := RunRunContainer(f, out, cmd, args)
|
||||
err := Run(f, out, cmd, args)
|
||||
cmdutil.CheckErr(err)
|
||||
},
|
||||
}
|
||||
cmdutil.AddPrinterFlags(cmd)
|
||||
cmd.Flags().String("generator", "run-container/v1", "The name of the API generator to use. Default is 'run-container-controller/v1'.")
|
||||
cmd.Flags().String("generator", "run/v1", "The name of the API generator to use. Default is 'run-controller/v1'.")
|
||||
cmd.Flags().String("image", "", "The image for the container to run.")
|
||||
cmd.MarkFlagRequired("image")
|
||||
cmd.Flags().IntP("replicas", "r", 1, "Number of replicas to create for this container. Default is 1.")
|
||||
@@ -62,13 +65,17 @@ func NewCmdRunContainer(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
cmd.Flags().String("overrides", "", "An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.")
|
||||
cmd.Flags().Int("port", -1, "The port that this container exposes.")
|
||||
cmd.Flags().Int("hostport", -1, "The host port mapping for the container port. To demonstrate a single-machine container.")
|
||||
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s) created by this call to run-container.")
|
||||
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s).")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func RunRunContainer(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
|
||||
func Run(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
|
||||
if os.Args[1] == "run-container" {
|
||||
printDeprecationWarning("run", "run-container")
|
||||
}
|
||||
|
||||
if len(args) != 1 {
|
||||
return cmdutil.UsageError(cmd, "NAME is required for run-container")
|
||||
return cmdutil.UsageError(cmd, "NAME is required for run")
|
||||
}
|
||||
|
||||
namespace, err := f.DefaultNamespace()
|
||||
|
@@ -19,6 +19,7 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@@ -28,38 +29,46 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
resize_long = `Set a new size for a Replication Controller.
|
||||
scale_long = `Set a new size for a Replication Controller.
|
||||
|
||||
Resize also allows users to specify one or more preconditions for the resize action.
|
||||
Scale also allows users to specify one or more preconditions for the scale action.
|
||||
If --current-replicas or --resource-version is specified, it is validated before the
|
||||
resize is attempted, and it is guaranteed that the precondition holds true when the
|
||||
resize is sent to the server.`
|
||||
resize_example = `// Resize replication controller named 'foo' to 3.
|
||||
$ kubectl resize --replicas=3 replicationcontrollers foo
|
||||
scale is attempted, and it is guaranteed that the precondition holds true when the
|
||||
scale is sent to the server.`
|
||||
scale_example = `// Scale replication controller named 'foo' to 3.
|
||||
$ kubectl scale --replicas=3 replicationcontrollers foo
|
||||
|
||||
// If the replication controller named foo's current size is 2, resize foo to 3.
|
||||
$ kubectl resize --current-replicas=2 --replicas=3 replicationcontrollers foo`
|
||||
// If the replication controller named foo's current size is 2, scale foo to 3.
|
||||
$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo`
|
||||
)
|
||||
|
||||
func NewCmdResize(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
// NewCmdScale returns a cobra command with the appropriate configuration and flags to run scale
|
||||
func NewCmdScale(f *cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "resize [--resource-version=version] [--current-replicas=count] --replicas=COUNT RESOURCE ID",
|
||||
Use: "scale [--resource-version=version] [--current-replicas=count] --replicas=COUNT RESOURCE ID",
|
||||
// resize is deprecated
|
||||
Aliases: []string{"resize"},
|
||||
Short: "Set a new size for a Replication Controller.",
|
||||
Long: resize_long,
|
||||
Example: resize_example,
|
||||
Long: scale_long,
|
||||
Example: scale_example,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := RunResize(f, out, cmd, args)
|
||||
err := RunScale(f, out, cmd, args)
|
||||
cmdutil.CheckErr(err)
|
||||
},
|
||||
}
|
||||
cmd.Flags().String("resource-version", "", "Precondition for resource version. Requires that the current resource version match this value in order to resize.")
|
||||
cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the replication controller match this value in order to resize.")
|
||||
cmd.Flags().String("resource-version", "", "Precondition for resource version. Requires that the current resource version match this value in order to scale.")
|
||||
cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the replication controller match this value in order to scale.")
|
||||
cmd.Flags().Int("replicas", -1, "The new desired number of replicas. Required.")
|
||||
cmd.MarkFlagRequired("replicas")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func RunResize(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
|
||||
// RunScale executes the scaling
|
||||
func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
|
||||
if os.Args[1] == "resize" {
|
||||
printDeprecationWarning("scale", "resize")
|
||||
}
|
||||
|
||||
count := cmdutil.GetFlagInt(cmd, "replicas")
|
||||
if count < 0 {
|
||||
return cmdutil.UsageError(cmd, "--replicas=COUNT RESOURCE ID")
|
||||
@@ -95,19 +104,19 @@ func RunResize(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []str
|
||||
}
|
||||
info := infos[0]
|
||||
|
||||
resizer, err := f.Resizer(mapping)
|
||||
scaler, err := f.Scaler(mapping)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resourceVersion := cmdutil.GetFlagString(cmd, "resource-version")
|
||||
currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
|
||||
precondition := &kubectl.ResizePrecondition{currentSize, resourceVersion}
|
||||
precondition := &kubectl.ScalePrecondition{currentSize, resourceVersion}
|
||||
retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
|
||||
waitForReplicas := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
|
||||
if err := resizer.Resize(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
|
||||
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprint(out, "resized\n")
|
||||
fmt.Fprint(out, "scaled\n")
|
||||
return nil
|
||||
}
|
@@ -30,7 +30,7 @@ const (
|
||||
stop_long = `Gracefully shut down a resource by id or filename.
|
||||
|
||||
Attempts to shut down and delete a resource that supports graceful termination.
|
||||
If the resource is resizable it will be resized to 0 before deletion.`
|
||||
If the resource is scalable it will be scaled to 0 before deletion.`
|
||||
stop_example = `// Shut down foo.
|
||||
$ kubectl stop replicationcontroller foo
|
||||
|
||||
|
@@ -65,8 +65,8 @@ type Factory struct {
|
||||
Describer func(mapping *meta.RESTMapping) (kubectl.Describer, error)
|
||||
// Returns a Printer for formatting objects of the given type or an error.
|
||||
Printer func(mapping *meta.RESTMapping, noHeaders, withNamespace bool) (kubectl.ResourcePrinter, error)
|
||||
// Returns a Resizer for changing the size of the specified RESTMapping type or an error
|
||||
Resizer func(mapping *meta.RESTMapping) (kubectl.Resizer, error)
|
||||
// Returns a Scaler for changing the size of the specified RESTMapping type or an error
|
||||
Scaler func(mapping *meta.RESTMapping) (kubectl.Scaler, error)
|
||||
// Returns a Reaper for gracefully shutting down resources.
|
||||
Reaper func(mapping *meta.RESTMapping) (kubectl.Reaper, error)
|
||||
// PodSelectorForObject returns the pod selector associated with the provided object
|
||||
@@ -93,8 +93,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||
flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags
|
||||
|
||||
generators := map[string]kubectl.Generator{
|
||||
"run-container/v1": kubectl.BasicReplicationController{},
|
||||
"service/v1": kubectl.ServiceGenerator{},
|
||||
"run/v1": kubectl.BasicReplicationController{},
|
||||
"service/v1": kubectl.ServiceGenerator{},
|
||||
}
|
||||
|
||||
clientConfig := optionalClientConfig
|
||||
@@ -187,12 +187,12 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
|
||||
LabelsForObject: func(object runtime.Object) (map[string]string, error) {
|
||||
return meta.NewAccessor().Labels(object)
|
||||
},
|
||||
Resizer: func(mapping *meta.RESTMapping) (kubectl.Resizer, error) {
|
||||
Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
|
||||
client, err := clients.ClientForVersion(mapping.APIVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubectl.ResizerFor(mapping.Kind, kubectl.NewResizerClient(client))
|
||||
return kubectl.ScalerFor(mapping.Kind, kubectl.NewScalerClient(client))
|
||||
},
|
||||
Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
|
||||
client, err := clients.ClientForVersion(mapping.APIVersion)
|
||||
|
@@ -297,7 +297,7 @@ func FindSourceController(r RollingUpdaterClient, namespace, name string) (*api.
|
||||
}
|
||||
|
||||
// Update all pods for a ReplicationController (oldRc) by creating a new
|
||||
// controller (newRc) with 0 replicas, and synchronously resizing oldRc,newRc
|
||||
// controller (newRc) with 0 replicas, and synchronously scaling oldRc,newRc
|
||||
// by 1 until oldRc has 0 replicas and newRc has the original # of desired
|
||||
// replicas. Cleanup occurs based on a RollingUpdaterCleanupPolicy.
|
||||
//
|
||||
@@ -364,12 +364,12 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
oldName, oldRc.Spec.Replicas,
|
||||
newName, newRc.Spec.Replicas)
|
||||
|
||||
newRc, err = r.resizeAndWait(newRc, retry, waitForReplicas)
|
||||
newRc, err = r.scaleAndWait(newRc, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(updatePeriod)
|
||||
oldRc, err = r.resizeAndWait(oldRc, retry, waitForReplicas)
|
||||
oldRc, err = r.scaleAndWait(oldRc, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -382,18 +382,18 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
||||
fmt.Fprintf(out, "Stopping %s replicas: %d -> %d\n",
|
||||
oldName, oldRc.Spec.Replicas, 0)
|
||||
oldRc.Spec.Replicas = 0
|
||||
oldRc, err = r.resizeAndWait(oldRc, retry, waitForReplicas)
|
||||
// oldRc, err = r.resizeAndWait(oldRc, interval, timeout)
|
||||
oldRc, err = r.scaleAndWait(oldRc, retry, waitForReplicas)
|
||||
// oldRc, err = r.scaleAndWait(oldRc, interval, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// add remaining replicas on newRc
|
||||
if newRc.Spec.Replicas != desired {
|
||||
fmt.Fprintf(out, "Resizing %s replicas: %d -> %d\n",
|
||||
fmt.Fprintf(out, "Scaling %s replicas: %d -> %d\n",
|
||||
newName, newRc.Spec.Replicas, desired)
|
||||
newRc.Spec.Replicas = desired
|
||||
newRc, err = r.resizeAndWait(newRc, retry, waitForReplicas)
|
||||
newRc, err = r.scaleAndWait(newRc, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -444,12 +444,12 @@ func (r *RollingUpdater) getExistingNewRc(sourceId, name string) (rc *api.Replic
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RollingUpdater) resizeAndWait(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
|
||||
resizer, err := ResizerFor("ReplicationController", r.c)
|
||||
func (r *RollingUpdater) scaleAndWait(rc *api.ReplicationController, retry *RetryParams, wait *RetryParams) (*api.ReplicationController, error) {
|
||||
scaler, err := ScalerFor("ReplicationController", r.c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := resizer.Resize(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ResizePrecondition{-1, ""}, retry, wait); err != nil {
|
||||
if err := scaler.Scale(rc.Namespace, rc.Name, uint(rc.Spec.Replicas), &ScalePrecondition{-1, ""}, retry, wait); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.c.GetReplicationController(r.ns, rc.ObjectMeta.Name)
|
||||
|
@@ -143,7 +143,7 @@ func TestUpdate(t *testing.T) {
|
||||
[]fakeResponse{
|
||||
// no existing newRc
|
||||
{nil, fmt.Errorf("not found")},
|
||||
// 3 gets for each resize
|
||||
// 3 gets for each scale
|
||||
{newRc(1, 1), nil},
|
||||
{newRc(1, 1), nil},
|
||||
{newRc(1, 1), nil},
|
||||
@@ -165,7 +165,7 @@ Update succeeded. Deleting foo-v1
|
||||
[]fakeResponse{
|
||||
// no existing newRc
|
||||
{nil, fmt.Errorf("not found")},
|
||||
// 3 gets for each resize
|
||||
// 3 gets for each scale
|
||||
{newRc(1, 2), nil},
|
||||
{newRc(1, 2), nil},
|
||||
{newRc(1, 2), nil},
|
||||
@@ -196,7 +196,7 @@ Update succeeded. Deleting foo-v1
|
||||
[]fakeResponse{
|
||||
// no existing newRc
|
||||
{nil, fmt.Errorf("not found")},
|
||||
// 3 gets for each resize
|
||||
// 3 gets for each scale
|
||||
{newRc(1, 2), nil},
|
||||
{newRc(1, 2), nil},
|
||||
{newRc(1, 2), nil},
|
||||
@@ -211,7 +211,7 @@ Update succeeded. Deleting foo-v1
|
||||
{oldRc(0), nil},
|
||||
{oldRc(0), nil},
|
||||
{oldRc(0), nil},
|
||||
// final resize on newRc
|
||||
// final scale on newRc
|
||||
{newRc(7, 7), nil},
|
||||
{newRc(7, 7), nil},
|
||||
{newRc(7, 7), nil},
|
||||
@@ -222,7 +222,7 @@ Update succeeded. Deleting foo-v1
|
||||
`Creating foo-v2
|
||||
Updating foo-v1 replicas: 1, foo-v2 replicas: 1
|
||||
Updating foo-v1 replicas: 0, foo-v2 replicas: 2
|
||||
Resizing foo-v2 replicas: 2 -> 7
|
||||
Scaling foo-v2 replicas: 2 -> 7
|
||||
Update succeeded. Deleting foo-v1
|
||||
`,
|
||||
}, {
|
||||
@@ -298,7 +298,7 @@ Update succeeded. Deleting foo-v1
|
||||
responses := []fakeResponse{
|
||||
// Existing newRc
|
||||
{rcExisting, nil},
|
||||
// 3 gets for each resize
|
||||
// 3 gets for each scale
|
||||
{newRc(2, 2), nil},
|
||||
{newRc(2, 2), nil},
|
||||
{newRc(2, 2), nil},
|
||||
|
@@ -49,7 +49,7 @@ func (BasicReplicationController) Generate(params map[string]string) (runtime.Ob
|
||||
}
|
||||
} else {
|
||||
labels = map[string]string{
|
||||
"run-container": params["name"],
|
||||
"run": params["name"],
|
||||
}
|
||||
}
|
||||
count, err := strconv.Atoi(params["replicas"])
|
||||
|
@@ -39,14 +39,14 @@ func TestGenerate(t *testing.T) {
|
||||
expected: &api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Labels: map[string]string{"run-container": "foo"},
|
||||
Labels: map[string]string{"run": "foo"},
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 1,
|
||||
Selector: map[string]string{"run-container": "foo"},
|
||||
Selector: map[string]string{"run": "foo"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"run-container": "foo"},
|
||||
Labels: map[string]string{"run": "foo"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@@ -70,14 +70,14 @@ func TestGenerate(t *testing.T) {
|
||||
expected: &api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Labels: map[string]string{"run-container": "foo"},
|
||||
Labels: map[string]string{"run": "foo"},
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 1,
|
||||
Selector: map[string]string{"run-container": "foo"},
|
||||
Selector: map[string]string{"run": "foo"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"run-container": "foo"},
|
||||
Labels: map[string]string{"run": "foo"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@@ -107,14 +107,14 @@ func TestGenerate(t *testing.T) {
|
||||
expected: &api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Labels: map[string]string{"run-container": "foo"},
|
||||
Labels: map[string]string{"run": "foo"},
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 1,
|
||||
Selector: map[string]string{"run-container": "foo"},
|
||||
Selector: map[string]string{"run": "foo"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Labels: map[string]string{"run-container": "foo"},
|
||||
Labels: map[string]string{"run": "foo"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
|
@@ -26,17 +26,17 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
// ResizePrecondition describes a condition that must be true for the resize to take place
|
||||
// ScalePrecondition describes a condition that must be true for the scale to take place
|
||||
// If CurrentSize == -1, it is ignored.
|
||||
// If CurrentResourceVersion is the empty string, it is ignored.
|
||||
// Otherwise they must equal the values in the replication controller for it to be valid.
|
||||
type ResizePrecondition struct {
|
||||
type ScalePrecondition struct {
|
||||
Size int
|
||||
ResourceVersion string
|
||||
}
|
||||
|
||||
// A PreconditionError is returned when a replication controller fails to match
|
||||
// the resize preconditions passed to kubectl.
|
||||
// the scale preconditions passed to kubectl.
|
||||
type PreconditionError struct {
|
||||
Precondition string
|
||||
ExpectedValue string
|
||||
@@ -47,29 +47,29 @@ func (pe PreconditionError) Error() string {
|
||||
return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue)
|
||||
}
|
||||
|
||||
type ControllerResizeErrorType int
|
||||
type ControllerScaleErrorType int
|
||||
|
||||
const (
|
||||
ControllerResizeGetFailure ControllerResizeErrorType = iota
|
||||
ControllerResizeUpdateFailure
|
||||
ControllerScaleGetFailure ControllerScaleErrorType = iota
|
||||
ControllerScaleUpdateFailure
|
||||
)
|
||||
|
||||
// A ControllerResizeError is returned when a the resize request passes
|
||||
// preconditions but fails to actually resize the controller.
|
||||
type ControllerResizeError struct {
|
||||
FailureType ControllerResizeErrorType
|
||||
// A ControllerScaleError is returned when a scale request passes
|
||||
// preconditions but fails to actually scale the controller.
|
||||
type ControllerScaleError struct {
|
||||
FailureType ControllerScaleErrorType
|
||||
ResourceVersion string
|
||||
ActualError error
|
||||
}
|
||||
|
||||
func (c ControllerResizeError) Error() string {
|
||||
func (c ControllerScaleError) Error() string {
|
||||
return fmt.Sprintf(
|
||||
"Resizing the controller failed with: %s; Current resource version %s",
|
||||
"Scaling the controller failed with: %s; Current resource version %s",
|
||||
c.ActualError, c.ResourceVersion)
|
||||
}
|
||||
|
||||
// Validate ensures that the preconditions match. Returns nil if they are valid, an error otherwise
|
||||
func (precondition *ResizePrecondition) Validate(controller *api.ReplicationController) error {
|
||||
func (precondition *ScalePrecondition) Validate(controller *api.ReplicationController) error {
|
||||
if precondition.Size != -1 && controller.Spec.Replicas != precondition.Size {
|
||||
return PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(controller.Spec.Replicas)}
|
||||
}
|
||||
@@ -79,29 +79,29 @@ func (precondition *ResizePrecondition) Validate(controller *api.ReplicationCont
|
||||
return nil
|
||||
}
|
||||
|
||||
type Resizer interface {
|
||||
// Resize resizes the named resource after checking preconditions. It optionally
|
||||
type Scaler interface {
|
||||
// Scale scales the named resource after checking preconditions. It optionally
|
||||
// retries in the event of resource version mismatch (if retry is not nil),
|
||||
// and optionally waits until the status of the resource matches newSize (if wait is not nil)
|
||||
Resize(namespace, name string, newSize uint, preconditions *ResizePrecondition, retry, wait *RetryParams) error
|
||||
// ResizeSimple does a simple one-shot attempt at resizing - not useful on it's own, but
|
||||
// a necessary building block for Resize
|
||||
ResizeSimple(namespace, name string, preconditions *ResizePrecondition, newSize uint) (string, error)
|
||||
Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, wait *RetryParams) error
|
||||
// ScaleSimple does a simple one-shot attempt at scaling - not useful on it's own, but
|
||||
// a necessary building block for Scale
|
||||
ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error)
|
||||
}
|
||||
|
||||
func ResizerFor(kind string, c ResizerClient) (Resizer, error) {
|
||||
func ScalerFor(kind string, c ScalerClient) (Scaler, error) {
|
||||
switch kind {
|
||||
case "ReplicationController":
|
||||
return &ReplicationControllerResizer{c}, nil
|
||||
return &ReplicationControllerScaler{c}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("no resizer has been implemented for %q", kind)
|
||||
return nil, fmt.Errorf("no scaler has been implemented for %q", kind)
|
||||
}
|
||||
|
||||
type ReplicationControllerResizer struct {
|
||||
c ResizerClient
|
||||
type ReplicationControllerScaler struct {
|
||||
c ScalerClient
|
||||
}
|
||||
|
||||
// RetryParams encapsulates the retry parameters used by kubectl's resizer.
|
||||
// RetryParams encapsulates the retry parameters used by kubectl's scaler.
|
||||
type RetryParams struct {
|
||||
Interval, Timeout time.Duration
|
||||
}
|
||||
@@ -110,15 +110,15 @@ func NewRetryParams(interval, timeout time.Duration) *RetryParams {
|
||||
return &RetryParams{interval, timeout}
|
||||
}
|
||||
|
||||
// ResizeCondition is a closure around Resize that facilitates retries via util.wait
|
||||
func ResizeCondition(r Resizer, precondition *ResizePrecondition, namespace, name string, count uint) wait.ConditionFunc {
|
||||
// ScaleCondition is a closure around Scale that facilitates retries via util.wait
|
||||
func ScaleCondition(r Scaler, precondition *ScalePrecondition, namespace, name string, count uint) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
_, err := r.ResizeSimple(namespace, name, precondition, count)
|
||||
switch e, _ := err.(ControllerResizeError); err.(type) {
|
||||
_, err := r.ScaleSimple(namespace, name, precondition, count)
|
||||
switch e, _ := err.(ControllerScaleError); err.(type) {
|
||||
case nil:
|
||||
return true, nil
|
||||
case ControllerResizeError:
|
||||
if e.FailureType == ControllerResizeUpdateFailure {
|
||||
case ControllerScaleError:
|
||||
if e.FailureType == ControllerScaleUpdateFailure {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@@ -126,10 +126,10 @@ func ResizeCondition(r Resizer, precondition *ResizePrecondition, namespace, nam
|
||||
}
|
||||
}
|
||||
|
||||
func (resizer *ReplicationControllerResizer) ResizeSimple(namespace, name string, preconditions *ResizePrecondition, newSize uint) (string, error) {
|
||||
controller, err := resizer.c.GetReplicationController(namespace, name)
|
||||
func (scaler *ReplicationControllerScaler) ScaleSimple(namespace, name string, preconditions *ScalePrecondition, newSize uint) (string, error) {
|
||||
controller, err := scaler.c.GetReplicationController(namespace, name)
|
||||
if err != nil {
|
||||
return "", ControllerResizeError{ControllerResizeGetFailure, "Unknown", err}
|
||||
return "", ControllerScaleError{ControllerScaleGetFailure, "Unknown", err}
|
||||
}
|
||||
if preconditions != nil {
|
||||
if err := preconditions.Validate(controller); err != nil {
|
||||
@@ -138,60 +138,60 @@ func (resizer *ReplicationControllerResizer) ResizeSimple(namespace, name string
|
||||
}
|
||||
controller.Spec.Replicas = int(newSize)
|
||||
// TODO: do retry on 409 errors here?
|
||||
if _, err := resizer.c.UpdateReplicationController(namespace, controller); err != nil {
|
||||
return "", ControllerResizeError{ControllerResizeUpdateFailure, controller.ResourceVersion, err}
|
||||
if _, err := scaler.c.UpdateReplicationController(namespace, controller); err != nil {
|
||||
return "", ControllerScaleError{ControllerScaleUpdateFailure, controller.ResourceVersion, err}
|
||||
}
|
||||
// TODO: do a better job of printing objects here.
|
||||
return "resized", nil
|
||||
return "scaled", nil
|
||||
}
|
||||
|
||||
// Resize updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
|
||||
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
|
||||
// (if wait is not nil).
|
||||
func (resizer *ReplicationControllerResizer) Resize(namespace, name string, newSize uint, preconditions *ResizePrecondition, retry, waitForReplicas *RetryParams) error {
|
||||
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
|
||||
if preconditions == nil {
|
||||
preconditions = &ResizePrecondition{-1, ""}
|
||||
preconditions = &ScalePrecondition{-1, ""}
|
||||
}
|
||||
if retry == nil {
|
||||
// Make it try only once, immediately
|
||||
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
|
||||
}
|
||||
cond := ResizeCondition(resizer, preconditions, namespace, name, newSize)
|
||||
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
|
||||
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
|
||||
return err
|
||||
}
|
||||
if waitForReplicas != nil {
|
||||
rc := &api.ReplicationController{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: name}}
|
||||
return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
|
||||
resizer.c.ControllerHasDesiredReplicas(rc))
|
||||
scaler.c.ControllerHasDesiredReplicas(rc))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResizerClient abstracts access to ReplicationControllers.
|
||||
type ResizerClient interface {
|
||||
// ScalerClient abstracts access to ReplicationControllers.
|
||||
type ScalerClient interface {
|
||||
GetReplicationController(namespace, name string) (*api.ReplicationController, error)
|
||||
UpdateReplicationController(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error)
|
||||
ControllerHasDesiredReplicas(rc *api.ReplicationController) wait.ConditionFunc
|
||||
}
|
||||
|
||||
func NewResizerClient(c client.Interface) ResizerClient {
|
||||
return &realResizerClient{c}
|
||||
func NewScalerClient(c client.Interface) ScalerClient {
|
||||
return &realScalerClient{c}
|
||||
}
|
||||
|
||||
// realResizerClient is a ResizerClient which uses a Kube client.
|
||||
type realResizerClient struct {
|
||||
// realScalerClient is a ScalerClient which uses a Kube client.
|
||||
type realScalerClient struct {
|
||||
client client.Interface
|
||||
}
|
||||
|
||||
func (c *realResizerClient) GetReplicationController(namespace, name string) (*api.ReplicationController, error) {
|
||||
func (c *realScalerClient) GetReplicationController(namespace, name string) (*api.ReplicationController, error) {
|
||||
return c.client.ReplicationControllers(namespace).Get(name)
|
||||
}
|
||||
|
||||
func (c *realResizerClient) UpdateReplicationController(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) {
|
||||
func (c *realScalerClient) UpdateReplicationController(namespace string, rc *api.ReplicationController) (*api.ReplicationController, error) {
|
||||
return c.client.ReplicationControllers(namespace).Update(rc)
|
||||
}
|
||||
|
||||
func (c *realResizerClient) ControllerHasDesiredReplicas(rc *api.ReplicationController) wait.ConditionFunc {
|
||||
func (c *realScalerClient) ControllerHasDesiredReplicas(rc *api.ReplicationController) wait.ConditionFunc {
|
||||
return client.ControllerHasDesiredReplicas(c.client, rc)
|
||||
}
|
@@ -41,37 +41,37 @@ func (c *ErrorReplicationControllerClient) ReplicationControllers(namespace stri
|
||||
return &ErrorReplicationControllers{testclient.FakeReplicationControllers{Fake: &c.Fake, Namespace: namespace}}
|
||||
}
|
||||
|
||||
func TestReplicationControllerResizeRetry(t *testing.T) {
|
||||
func TestReplicationControllerScaleRetry(t *testing.T) {
|
||||
fake := &ErrorReplicationControllerClient{Fake: testclient.Fake{}}
|
||||
resizer := ReplicationControllerResizer{NewResizerClient(fake)}
|
||||
preconditions := ResizePrecondition{-1, ""}
|
||||
scaler := ReplicationControllerScaler{NewScalerClient(fake)}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
namespace := "default"
|
||||
|
||||
resizeFunc := ResizeCondition(&resizer, &preconditions, namespace, name, count)
|
||||
pass, err := resizeFunc()
|
||||
scaleFunc := ScaleCondition(&scaler, &preconditions, namespace, name, count)
|
||||
pass, err := scaleFunc()
|
||||
if pass != false {
|
||||
t.Errorf("Expected an update failure to return pass = false, got pass = %v", pass)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Did not expect an error on update failure, got %v", err)
|
||||
}
|
||||
preconditions = ResizePrecondition{3, ""}
|
||||
resizeFunc = ResizeCondition(&resizer, &preconditions, namespace, name, count)
|
||||
pass, err = resizeFunc()
|
||||
preconditions = ScalePrecondition{3, ""}
|
||||
scaleFunc = ScaleCondition(&scaler, &preconditions, namespace, name, count)
|
||||
pass, err = scaleFunc()
|
||||
if err == nil {
|
||||
t.Errorf("Expected error on precondition failure")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationControllerResize(t *testing.T) {
|
||||
func TestReplicationControllerScale(t *testing.T) {
|
||||
fake := &testclient.Fake{}
|
||||
resizer := ReplicationControllerResizer{NewResizerClient(fake)}
|
||||
preconditions := ResizePrecondition{-1, ""}
|
||||
scaler := ReplicationControllerScaler{NewScalerClient(fake)}
|
||||
preconditions := ScalePrecondition{-1, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
resizer.Resize("default", name, count, &preconditions, nil, nil)
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
if len(fake.Actions) != 2 {
|
||||
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions)
|
||||
@@ -84,17 +84,17 @@ func TestReplicationControllerResize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplicationControllerResizeFailsPreconditions(t *testing.T) {
|
||||
func TestReplicationControllerScaleFailsPreconditions(t *testing.T) {
|
||||
fake := testclient.NewSimpleFake(&api.ReplicationController{
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: 10,
|
||||
},
|
||||
})
|
||||
resizer := ReplicationControllerResizer{NewResizerClient(fake)}
|
||||
preconditions := ResizePrecondition{2, ""}
|
||||
scaler := ReplicationControllerScaler{NewScalerClient(fake)}
|
||||
preconditions := ScalePrecondition{2, ""}
|
||||
count := uint(3)
|
||||
name := "foo"
|
||||
resizer.Resize("default", name, count, &preconditions, nil, nil)
|
||||
scaler.Scale("default", name, count, &preconditions, nil, nil)
|
||||
|
||||
if len(fake.Actions) != 1 {
|
||||
t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions)
|
||||
@@ -106,18 +106,18 @@ func TestReplicationControllerResizeFailsPreconditions(t *testing.T) {
|
||||
|
||||
func TestPreconditionValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
preconditions ResizePrecondition
|
||||
preconditions ScalePrecondition
|
||||
controller api.ReplicationController
|
||||
expectError bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
preconditions: ResizePrecondition{-1, ""},
|
||||
preconditions: ScalePrecondition{-1, ""},
|
||||
expectError: false,
|
||||
test: "defaults",
|
||||
},
|
||||
{
|
||||
preconditions: ResizePrecondition{-1, ""},
|
||||
preconditions: ScalePrecondition{-1, ""},
|
||||
controller: api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
@@ -130,7 +130,7 @@ func TestPreconditionValidate(t *testing.T) {
|
||||
test: "defaults 2",
|
||||
},
|
||||
{
|
||||
preconditions: ResizePrecondition{0, ""},
|
||||
preconditions: ScalePrecondition{0, ""},
|
||||
controller: api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
@@ -143,7 +143,7 @@ func TestPreconditionValidate(t *testing.T) {
|
||||
test: "size matches",
|
||||
},
|
||||
{
|
||||
preconditions: ResizePrecondition{-1, "foo"},
|
||||
preconditions: ScalePrecondition{-1, "foo"},
|
||||
controller: api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
@@ -156,7 +156,7 @@ func TestPreconditionValidate(t *testing.T) {
|
||||
test: "resource version matches",
|
||||
},
|
||||
{
|
||||
preconditions: ResizePrecondition{10, "foo"},
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
controller: api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
@@ -169,7 +169,7 @@ func TestPreconditionValidate(t *testing.T) {
|
||||
test: "both match",
|
||||
},
|
||||
{
|
||||
preconditions: ResizePrecondition{10, "foo"},
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
controller: api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "foo",
|
||||
@@ -182,7 +182,7 @@ func TestPreconditionValidate(t *testing.T) {
|
||||
test: "size different",
|
||||
},
|
||||
{
|
||||
preconditions: ResizePrecondition{10, "foo"},
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
controller: api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "bar",
|
||||
@@ -195,7 +195,7 @@ func TestPreconditionValidate(t *testing.T) {
|
||||
test: "version different",
|
||||
},
|
||||
{
|
||||
preconditions: ResizePrecondition{10, "foo"},
|
||||
preconditions: ScalePrecondition{10, "foo"},
|
||||
controller: api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ResourceVersion: "bar",
|
@@ -82,13 +82,13 @@ type objInterface interface {
|
||||
|
||||
func (reaper *ReplicationControllerReaper) Stop(namespace, name string, gracePeriod *api.DeleteOptions) (string, error) {
|
||||
rc := reaper.ReplicationControllers(namespace)
|
||||
resizer, err := ResizerFor("ReplicationController", NewResizerClient(*reaper))
|
||||
scaler, err := ScalerFor("ReplicationController", NewScalerClient(*reaper))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
retry := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
||||
waitForReplicas := NewRetryParams(reaper.pollInterval, reaper.timeout)
|
||||
if err = resizer.Resize(namespace, name, 0, nil, retry, waitForReplicas); err != nil {
|
||||
if err = scaler.Scale(namespace, name, 0, nil, retry, waitForReplicas); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := rc.Delete(name); err != nil {
|
||||
|
@@ -88,10 +88,10 @@ var _ = Describe("kubectl", func() {
|
||||
runKubectl("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns))
|
||||
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
By("scaling down the replication controller")
|
||||
runKubectl("resize", "rc", "update-demo-nautilus", "--replicas=1", fmt.Sprintf("--namespace=%v", ns))
|
||||
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=1", fmt.Sprintf("--namespace=%v", ns))
|
||||
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
By("scaling up the replication controller")
|
||||
runKubectl("resize", "rc", "update-demo-nautilus", "--replicas=2", fmt.Sprintf("--namespace=%v", ns))
|
||||
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=2", fmt.Sprintf("--namespace=%v", ns))
|
||||
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
|
||||
})
|
||||
|
||||
|
@@ -112,20 +112,20 @@ func computeRCCounts(total int) (int, int, int) {
|
||||
return smallRCCount, mediumRCCount, bigRCCount
|
||||
}
|
||||
|
||||
// The function creates a RC and then every few second resize it and with 0.1 probability deletes it.
|
||||
// The function creates a RC and then every few second scale it and with 0.1 probability deletes it.
|
||||
func playWithRC(c *client.Client, wg *sync.WaitGroup, ns, name string, size int) {
|
||||
defer GinkgoRecover()
|
||||
defer wg.Done()
|
||||
rcExist := false
|
||||
// Once every 1-2 minutes perform resize of RC.
|
||||
// Once every 1-2 minutes perform scale of RC.
|
||||
for start := time.Now(); time.Since(start) < simulationTime; time.Sleep(time.Duration(60+rand.Intn(60)) * time.Second) {
|
||||
if !rcExist {
|
||||
expectNoError(RunRC(c, name, ns, image, size), fmt.Sprintf("creating rc %s in namespace %s", name, ns))
|
||||
rcExist = true
|
||||
}
|
||||
// Resize RC to a random size between 0.5x and 1.5x of the original size.
|
||||
// Scale RC to a random size between 0.5x and 1.5x of the original size.
|
||||
newSize := uint(rand.Intn(size+1) + size/2)
|
||||
expectNoError(ResizeRC(c, ns, name, newSize), fmt.Sprintf("resizing rc %s in namespace %s", name, ns))
|
||||
expectNoError(ScaleRC(c, ns, name, newSize), fmt.Sprintf("scaling rc %s in namespace %s", name, ns))
|
||||
// With probability 0.1 remove this RC.
|
||||
if rand.Intn(10) == 0 {
|
||||
expectNoError(DeleteRC(c, ns, name), fmt.Sprintf("deleting rc %s in namespace %s", name, ns))
|
||||
|
@@ -792,14 +792,14 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error
|
||||
return nil
|
||||
}
|
||||
|
||||
func ResizeRC(c *client.Client, ns, name string, size uint) error {
|
||||
By(fmt.Sprintf("Resizing replication controller %s in namespace %s to %d", name, ns, size))
|
||||
resizer, err := kubectl.ResizerFor("ReplicationController", kubectl.NewResizerClient(c))
|
||||
func ScaleRC(c *client.Client, ns, name string, size uint) error {
|
||||
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
|
||||
scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(c))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
|
||||
if err = resizer.Resize(ns, name, size, nil, nil, waitForReplicas); err != nil {
|
||||
if err = scaler.Scale(ns, name, size, nil, nil, waitForReplicas); err != nil {
|
||||
return err
|
||||
}
|
||||
return waitForRCPodsRunning(c, ns, name)
|
||||
|
@@ -42,7 +42,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// Timeout used in benchmarks, to eg: resize an rc
|
||||
// Timeout used in benchmarks, to eg: scale an rc
|
||||
DefaultTimeout = 30 * time.Minute
|
||||
|
||||
// Rc manifest used to create pods for benchmarks.
|
||||
@@ -191,26 +191,26 @@ func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResizeRC resizes the given rc to the given replicas.
|
||||
func ResizeRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
|
||||
resizer, err := kubectl.ResizerFor("ReplicationController", kubectl.NewResizerClient(restClient))
|
||||
// ScaleRC scales the given rc to the given replicas.
|
||||
func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
|
||||
scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(restClient))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
retry := &kubectl.RetryParams{50 * time.Millisecond, DefaultTimeout}
|
||||
waitForReplicas := &kubectl.RetryParams{50 * time.Millisecond, DefaultTimeout}
|
||||
err = resizer.Resize(ns, name, uint(replicas), nil, retry, waitForReplicas)
|
||||
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resized, err := restClient.ReplicationControllers(ns).Get(name)
|
||||
scaled, err := restClient.ReplicationControllers(ns).Get(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resized, nil
|
||||
return scaled, nil
|
||||
}
|
||||
|
||||
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's resizer.
|
||||
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's scaler.
|
||||
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
|
||||
created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
|
||||
if err != nil {
|
||||
@@ -221,11 +221,11 @@ func StartRC(controller *api.ReplicationController, restClient *client.Client) (
|
||||
}
|
||||
}
|
||||
// If we just created an rc, wait till it creates its replicas.
|
||||
return ResizeRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
|
||||
return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
|
||||
}
|
||||
|
||||
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
|
||||
// a temp rc, resizes it to match numPods, then deletes the rc leaving behind the pods.
|
||||
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
|
||||
func StartPods(numPods int, host string, restClient *client.Client) error {
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
|
Reference in New Issue
Block a user