diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index edeac65e81b..0969a1d22bb 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -8246,7 +8246,7 @@ "type": "string", "paramType": "query", "name": "sinceTime", - "description": "An RFC3339 timestamp from which to show logs. If this value preceeds the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "description": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "required": false, "allowMultiple": false }, @@ -17236,7 +17236,7 @@ }, "v1.ServicePort": { "id": "v1.ServicePort", - "description": "ServicePort conatins information on service's port.", + "description": "ServicePort contains information on service's port.", "required": [ "port" ], diff --git a/build/common.sh b/build/common.sh index 7fd671834f5..4c2b8b26e7f 100755 --- a/build/common.sh +++ b/build/common.sh @@ -1562,7 +1562,7 @@ function kube::release::docker::release() { kube::log::status "Pushing ${binary} to ${docker_target}" "${docker_push_cmd[@]}" push "${docker_target}" - # If we have a amd64 docker image. Tag it without -amd64 also and push it for compability with earlier versions + # If we have a amd64 docker image. Tag it without -amd64 also and push it for compatibility with earlier versions if [[ ${arch} == "amd64" ]]; then local legacy_docker_target="${KUBE_DOCKER_REGISTRY}/${binary}:${KUBE_DOCKER_IMAGE_TAG}" diff --git a/cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml b/cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml index 49c3da1acfc..0e7b1b05c95 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml +++ b/cluster/addons/cluster-loadbalancing/glbc/glbc-controller.yaml @@ -25,7 +25,7 @@ spec: terminationGracePeriodSeconds: 600 containers: - name: default-http-backend - # Any image is permissable as long as: + # Any image is permissible as long as: # 1. It serves a 404 page at / # 2. It serves 200 on a /healthz endpoint image: gcr.io/google_containers/defaultbackend:1.0 diff --git a/cluster/addons/registry/README.md b/cluster/addons/registry/README.md index 42f56df0d2a..9faacf6984d 100644 --- a/cluster/addons/registry/README.md +++ b/cluster/addons/registry/README.md @@ -60,7 +60,7 @@ created independently - this is not something Kubernetes manages for you (yet). ### I don't want or don't have persistent storage If you are running in a place that doesn't have networked storage, or if you -just want to kick the tires on this without commiting to it, you can easily +just want to kick the tires on this without committing to it, you can easily adapt the `ReplicationController` specification below to use a simple `emptyDir` volume instead of a `persistentVolumeClaim`. diff --git a/cluster/aws/util.sh b/cluster/aws/util.sh index 7052cdb285d..05142502f4b 100755 --- a/cluster/aws/util.sh +++ b/cluster/aws/util.sh @@ -53,7 +53,7 @@ if [[ "${KUBE_OS_DISTRIBUTION}" == "ubuntu" ]]; then KUBE_OS_DISTRIBUTION=vivid fi -# For GCE script compatability +# For GCE script compatibility OS_DISTRIBUTION=${KUBE_OS_DISTRIBUTION} case "${KUBE_OS_DISTRIBUTION}" in diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml index 390225822b1..4a6f0362532 100644 --- a/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml @@ -25,7 +25,7 @@ spec: terminationGracePeriodSeconds: 600 containers: - name: default-http-backend - # Any image is permissable as long as: + # Any image is permissible as long as: # 1. It serves a 404 page at / # 2. It serves 200 on a /healthz endpoint image: gcr.io/google_containers/defaultbackend:1.0 diff --git a/cluster/gce/trusty/helper.sh b/cluster/gce/trusty/helper.sh index 206f3237d44..32349c26244 100755 --- a/cluster/gce/trusty/helper.sh +++ b/cluster/gce/trusty/helper.sh @@ -18,7 +18,7 @@ # The code and configuration is for running node instances on Ubuntu images. # The master is still on Debian. In addition, the configuration is based on -# upstart, which is in Ubuntu upto 14.04 LTS (Trusty). Ubuntu 15.04 and above +# upstart, which is in Ubuntu up to 14.04 LTS (Trusty). Ubuntu 15.04 and above # replaced upstart with systemd as the init system. Consequently, the # configuration cannot work on these images. diff --git a/cluster/images/etcd/Makefile b/cluster/images/etcd/Makefile index c301a97df99..0ba6edacbfb 100644 --- a/cluster/images/etcd/Makefile +++ b/cluster/images/etcd/Makefile @@ -46,14 +46,14 @@ build: docker build -t ${REGISTRY}/etcd-${ARCH}:${TAG} ${TEMP_DIR} ifeq ($(ARCH),amd64) - # Backward compatability. TODO: deprecate this image tag + # Backward compatibility. TODO: deprecate this image tag docker tag -f ${REGISTRY}/etcd-${ARCH}:${TAG} ${REGISTRY}/etcd:${TAG} endif push: build gcloud docker push ${REGISTRY}/etcd-${ARCH}:${TAG} - # Backward compatability. TODO: deprecate this image tag + # Backward compatibility. TODO: deprecate this image tag ifeq ($(ARCH),amd64) gcloud docker push ${REGISTRY}/etcd:${TAG} endif diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index 7a48458d8fd..89202c3748b 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -41,7 +41,7 @@ endif cd ${TEMP_DIR} && sed -i "s/ARCH/${ARCH}/g" master-multi.json master.json kube-proxy.json cd ${TEMP_DIR} && sed -i "s/BASEIMAGE/${BASEIMAGE}/g" Dockerfile docker build -t ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ${TEMP_DIR} - # Backward compatability. TODO: deprecate this image tag + # Backward compatibility. TODO: deprecate this image tag ifeq ($(ARCH),amd64) docker tag -f ${REGISTRY}/hyperkube-${ARCH}:${VERSION} ${REGISTRY}/hyperkube:${VERSION} endif diff --git a/cluster/juju/util.sh b/cluster/juju/util.sh index 4d2f8078c45..ea0c629a29c 100755 --- a/cluster/juju/util.sh +++ b/cluster/juju/util.sh @@ -128,7 +128,7 @@ function kube-down() { } function prepare-e2e() { - echo "prepare-e2e() The Juju provider does not need any preperations for e2e." 1>&2 + echo "prepare-e2e() The Juju provider does not need any preparations for e2e." 1>&2 } function sleep-status() { diff --git a/cluster/ubuntu/reconfDocker.sh b/cluster/ubuntu/reconfDocker.sh index 5e7b1cb78dc..009b871d6f9 100755 --- a/cluster/ubuntu/reconfDocker.sh +++ b/cluster/ubuntu/reconfDocker.sh @@ -72,6 +72,6 @@ elif [[ $1 == "ai" ]]; then elif [[ $1 == "a" ]]; then config_etcd else - echo "Another arguement is required." + echo "Another argument is required." exit 1 fi diff --git a/cmd/kubelet/app/options/options.go b/cmd/kubelet/app/options/options.go index bcf30281277..f834956c475 100644 --- a/cmd/kubelet/app/options/options.go +++ b/cmd/kubelet/app/options/options.go @@ -182,7 +182,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&s.StreamingConnectionIdleTimeout.Duration, "streaming-connection-idle-timeout", s.StreamingConnectionIdleTimeout.Duration, "Maximum time a streaming connection can be idle before the connection is automatically closed. 0 indicates no timeout. Example: '5m'") fs.DurationVar(&s.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", s.NodeStatusUpdateFrequency.Duration, "Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. Default: 10s") bindableNodeLabels := util.ConfigurationMap(s.NodeLabels) - fs.Var(&bindableNodeLabels, "node-labels", " Labels to add when registering the node in the cluster. Labels must are key=value pairs seperated by ','.") + fs.Var(&bindableNodeLabels, "node-labels", " Labels to add when registering the node in the cluster. Labels must are key=value pairs separated by ','.") fs.DurationVar(&s.ImageMinimumGCAge.Duration, "minimum-image-ttl-duration", s.ImageMinimumGCAge.Duration, "Minimum age for a unused image before it is garbage collected. Examples: '300ms', '10s' or '2h45m'. Default: '2m'") fs.IntVar(&s.ImageGCHighThresholdPercent, "image-gc-high-threshold", s.ImageGCHighThresholdPercent, "The percent of disk usage after which image garbage collection is always run. Default: 90%") fs.IntVar(&s.ImageGCLowThresholdPercent, "image-gc-low-threshold", s.ImageGCLowThresholdPercent, "The percent of disk usage before which image garbage collection is never run. Lowest disk usage to garbage collect to. Default: 80%") diff --git a/cmd/libs/go2idl/generator/default_package.go b/cmd/libs/go2idl/generator/default_package.go index 77f20bbb291..2e6d29b5a8a 100644 --- a/cmd/libs/go2idl/generator/default_package.go +++ b/cmd/libs/go2idl/generator/default_package.go @@ -20,7 +20,7 @@ import ( "k8s.io/kubernetes/cmd/libs/go2idl/types" ) -// DefaultPackage contains a default implentation of Package. +// DefaultPackage contains a default implementation of Package. type DefaultPackage struct { // Short name of package, used in the "package xxxx" line. PackageName string diff --git a/cmd/libs/go2idl/go-to-protobuf/protobuf/package.go b/cmd/libs/go2idl/go-to-protobuf/protobuf/package.go index 1fae676416d..5a42ca504b5 100644 --- a/cmd/libs/go2idl/go-to-protobuf/protobuf/package.go +++ b/cmd/libs/go2idl/go-to-protobuf/protobuf/package.go @@ -52,7 +52,7 @@ func newProtobufPackage(packagePath, packageName string, generateAll bool, omitF return pkg } -// protobufPackage contains the protobuf implentation of Package. +// protobufPackage contains the protobuf implementation of Package. type protobufPackage struct { // Short name of package, used in the "package xxxx" line. PackageName string diff --git a/contrib/mesos/pkg/executor/messages/messages.go b/contrib/mesos/pkg/executor/messages/messages.go index 9f0e20ef7b2..dca1aa015bc 100644 --- a/contrib/mesos/pkg/executor/messages/messages.go +++ b/contrib/mesos/pkg/executor/messages/messages.go @@ -29,7 +29,7 @@ const ( TaskKilled = "task-killed" TaskLost = "task-lost" UnmarshalTaskDataFailure = "unmarshal-task-data-failure" - TaskLostAck = "task-lost-ack" // executor acknowledgement of forwarded TASK_LOST framework message + TaskLostAck = "task-lost-ack" // executor acknowledgment of forwarded TASK_LOST framework message Kamikaze = "kamikaze" WrongSlaveFailure = "pod-for-wrong-slave-failure" AnnotationUpdateFailure = "annotation-update-failure" diff --git a/contrib/mesos/pkg/offers/offers.go b/contrib/mesos/pkg/offers/offers.go index 09e35ce1f0c..c30a4332213 100644 --- a/contrib/mesos/pkg/offers/offers.go +++ b/contrib/mesos/pkg/offers/offers.go @@ -271,7 +271,7 @@ func (s *offerStorage) Delete(offerId string, reason metrics.OfferDeclinedReason s.declineOffer(offerId, offer.Host(), reason) } else { // some pod has acquired this and may attempt to launch a task with it - // failed schedule/launch attempts are requried to Release() any claims on the offer + // failed schedule/launch attempts are required to Release() any claims on the offer // TODO(jdef): not sure what a good value is here. the goal is to provide a // launchTasks (driver) operation enough time to complete so that we don't end diff --git a/contrib/mesos/pkg/scheduler/components/framework/framework.go b/contrib/mesos/pkg/scheduler/components/framework/framework.go index 3afb760fb0d..1fe2a1678bf 100644 --- a/contrib/mesos/pkg/scheduler/components/framework/framework.go +++ b/contrib/mesos/pkg/scheduler/components/framework/framework.go @@ -315,7 +315,7 @@ func (k *framework) Registered(drv bindings.SchedulerDriver, fid *mesos.Framewor } // Reregistered is called when the scheduler re-registered with the master successfully. -// This happends when the master fails over. +// This happens when the master fails over. func (k *framework) Reregistered(drv bindings.SchedulerDriver, mi *mesos.MasterInfo) { log.Infof("Scheduler reregistered with the master: %v\n", mi) diff --git a/contrib/mesos/pkg/scheduler/components/podreconciler/doc.go b/contrib/mesos/pkg/scheduler/components/podreconciler/doc.go index e762c815006..c4b8119a123 100644 --- a/contrib/mesos/pkg/scheduler/components/podreconciler/doc.go +++ b/contrib/mesos/pkg/scheduler/components/podreconciler/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package podreconciler implements pod reconcilation of pods which failed +// Package podreconciler implements pod reconciliation of pods which failed // to launch, i.e. before binding by the executor took place. package podreconciler diff --git a/contrib/mesos/pkg/scheduler/components/tasksreconciler/doc.go b/contrib/mesos/pkg/scheduler/components/tasksreconciler/doc.go index e78714794d0..4a24918a6c4 100644 --- a/contrib/mesos/pkg/scheduler/components/tasksreconciler/doc.go +++ b/contrib/mesos/pkg/scheduler/components/tasksreconciler/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package taskreconciler implement Mesos task reconcilation. +// Package taskreconciler implement Mesos task reconciliation. package taskreconciler diff --git a/docs/admin/admission-controllers.md b/docs/admin/admission-controllers.md index b903cfc7240..de880e9410c 100644 --- a/docs/admin/admission-controllers.md +++ b/docs/admin/admission-controllers.md @@ -182,7 +182,7 @@ We strongly recommend `NamespaceLifecycle` over `NamespaceAutoProvision`. ### NamespaceLifecycle This plug-in enforces that a `Namespace` that is undergoing termination cannot have new objects created in it, -and ensures that requests in a non-existant `Namespace` are rejected. +and ensures that requests in a non-existent `Namespace` are rejected. A `Namespace` deletion kicks off a sequence of operations that remove all objects (pods, services, etc.) in that namespace. In order to enforce integrity of that process, we strongly recommend running this plug-in. diff --git a/docs/admin/kubelet.md b/docs/admin/kubelet.md index 8f8d0b24427..cd5f478d251 100644 --- a/docs/admin/kubelet.md +++ b/docs/admin/kubelet.md @@ -120,7 +120,7 @@ kubelet --network-plugin="": The name of the network plugin to be invoked for various events in kubelet/pod lifecycle --network-plugin-dir="/usr/libexec/kubernetes/kubelet-plugins/net/exec/": The full path of the directory in which to search for network plugins --node-ip="": IP address of the node. If set, kubelet will use this IP address for the node - --node-labels=: Labels to add when registering the node in the cluster. Labels must are key=value pairs seperated by ','. + --node-labels=: Labels to add when registering the node in the cluster. Labels must are key=value pairs separated by ','. --node-status-update-frequency=10s: Specifies how often kubelet posts node status to master. Note: be cautious when changing the constant, it must work with nodeMonitorGracePeriod in nodecontroller. Default: 10s --non-masquerade-cidr="10.0.0.0/8": Traffic to IPs outside this range will use IP masquerade. --oom-score-adj=-999: The oom-score-adj value for kubelet process. Values must be within the range [-1000, 1000] @@ -152,7 +152,7 @@ kubelet --volume-stats-agg-period=1m0s: Specifies interval for kubelet to calculate and cache the volume disk usage for all pods and volumes. To disable volume calculations, set to 0. Default: '1m' ``` -###### Auto generated by spf13/cobra on 15-Feb-2016 +###### Auto generated by spf13/cobra on 17-Feb-2016 diff --git a/docs/admin/master-node-communication.md b/docs/admin/master-node-communication.md index c86bba8b68d..b775e295430 100644 --- a/docs/admin/master-node-communication.md +++ b/docs/admin/master-node-communication.md @@ -102,7 +102,7 @@ HTTP connections and are therefore neither authenticated nor encrypted. They can be run over a secure HTTPS connection by prefixing `https:` to the node, pod, or service name in the API URL, but they will not validate the certificate provided by the HTTPS endpoint nor provide client credentials so while the -connection will by encrypted, it will not provide any guarentees of integrity. +connection will by encrypted, it will not provide any guarantees of integrity. These connections **are not currently safe** to run over untrusted and/or public networks. diff --git a/docs/api-reference/v1/definitions.html b/docs/api-reference/v1/definitions.html index eaabcb460e2..3af45017f53 100755 --- a/docs/api-reference/v1/definitions.html +++ b/docs/api-reference/v1/definitions.html @@ -5470,7 +5470,7 @@ The resulting set of endpoints can be viewed as:

v1.ServicePort

-

ServicePort conatins information on service’s port.

+

ServicePort contains information on service’s port.

diff --git a/docs/api-reference/v1/operations.html b/docs/api-reference/v1/operations.html index a09910f227b..8a2691eecfd 100755 --- a/docs/api-reference/v1/operations.html +++ b/docs/api-reference/v1/operations.html @@ -8196,7 +8196,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } - + diff --git a/docs/design/clustering/dynamic.seqdiag b/docs/design/clustering/dynamic.seqdiag index 95bb395e886..567d5bf9606 100644 --- a/docs/design/clustering/dynamic.seqdiag +++ b/docs/design/clustering/dynamic.seqdiag @@ -15,7 +15,7 @@ seqdiag { user ->> kubelet [label="start\n- bootstrap-cluster-uri"]; kubelet => bootstrap [label="get-master", return="returns\n- master-location\n- master-ca"]; - kubelet ->> master [label="signCert\n- unsigned-kubelet-cert", return="retuns\n- kubelet-cert"]; + kubelet ->> master [label="signCert\n- unsigned-kubelet-cert", return="returns\n- kubelet-cert"]; user => master [label="getSignRequests"]; user => master [label="approveSignRequests"]; kubelet <<-- master [label="returns\n- kubelet-cert"]; diff --git a/docs/design/enhance-pluggable-policy.md b/docs/design/enhance-pluggable-policy.md index 1ee9bf29d2c..9cdd9a2d93d 100644 --- a/docs/design/enhance-pluggable-policy.md +++ b/docs/design/enhance-pluggable-policy.md @@ -54,7 +54,7 @@ An API request has the following attributes that can be considered for authoriza - resourceVersion - the API version of the resource being accessed - resource - which resource is being accessed - applies only to the API endpoints, such as - `/api/v1beta1/pods`. For miscelaneous endpoints, like `/version`, the kind is the empty string. + `/api/v1beta1/pods`. For miscellaneous endpoints, like `/version`, the kind is the empty string. - resourceName - the name of the resource during a get, update, or delete action. - subresource - which subresource is being accessed diff --git a/docs/design/indexed-job.md b/docs/design/indexed-job.md index 7b72cc0f288..b928f722fe7 100644 --- a/docs/design/indexed-job.md +++ b/docs/design/indexed-job.md @@ -33,18 +33,18 @@ Documentation for other releases can be found at ## Summary This design extends kubernetes with user-friendly support for -running embarassingly parallel jobs. +running embarrassingly parallel jobs. Here, *parallel* means on multiple nodes, which means multiple pods. -By *embarassingly parallel*, it is meant that the pods +By *embarrassingly parallel*, it is meant that the pods have no dependencies between each other. In particular, neither ordering between pods nor gang scheduling are supported. -Users already have two other options for running embarassingly parallel +Users already have two other options for running embarrassingly parallel Jobs (described in the next section), but both have ease-of-use issues. Therefore, this document proposes extending the Job resource type to support -a third way to run embarassingly parallel programs, with a focus on +a third way to run embarrassingly parallel programs, with a focus on ease of use. This new style of Job is called an *indexed job*, because each Pod of the Job @@ -53,7 +53,7 @@ is specialized to work on a particular *index* from a fixed length array of work ## Background The Kubernetes [Job](../../docs/user-guide/jobs.md) already supports -the embarassingly parallel use case through *workqueue jobs*. +the embarrassingly parallel use case through *workqueue jobs*. While [workqueue jobs](../../docs/user-guide/jobs.md#job-patterns) are very flexible, they can be difficult to use. They: (1) typically require running a message queue @@ -242,7 +242,7 @@ In the above example: - `--restart=OnFailure` implies creating a job instead of replicationController. - Each pods command line is `/usr/local/bin/process_file $F`. - `--per-completion-env=` implies the jobs `.spec.completions` is set to the length of the argument array (3 in the example). -- `--per-completion-env=F=` causes env var with `F` to be available in the enviroment when the command line is evaluated. +- `--per-completion-env=F=` causes env var with `F` to be available in the environment when the command line is evaluated. How exactly this happens is discussed later in the doc: this is a sketch of the user experience. @@ -269,7 +269,7 @@ Another case we do not try to handle is where the input file does not exist yet #### Multiple parameters -The user may also have multiple paramters, like in [work list 2](#work-list-2). +The user may also have multiple parameters, like in [work list 2](#work-list-2). One way is to just list all the command lines already expanded, one per line, in a file, like this: ``` @@ -491,7 +491,7 @@ The index-only approach: - requires that the user keep the *per completion parameters* in a separate storage, such as a configData or networked storage. - makes no changes to the JobSpec. - Drawback: while in separate storage, they could be mutatated, which would have unexpected effects -- Drawback: Logic for using index to lookup paramters needs to be in the Pod. +- Drawback: Logic for using index to lookup parameters needs to be in the Pod. - Drawback: CLIs and UIs are limited to using the "index" as the identity of a pod from a job. They cannot easily say, for example `repeated failures on the pod processing banana.txt`. diff --git a/docs/design/taint-toleration-dedicated.md b/docs/design/taint-toleration-dedicated.md index cca2ee448e6..7eb37da9441 100644 --- a/docs/design/taint-toleration-dedicated.md +++ b/docs/design/taint-toleration-dedicated.md @@ -51,7 +51,7 @@ on a node but do not prevent it, taints that prevent a pod from starting on Kube if the pod's `NodeName` was written directly (i.e. pod did not go through the scheduler), and taints that evict already-running pods. [This comment](https://github.com/kubernetes/kubernetes/issues/3885#issuecomment-146002375) -has more background on these diffrent scenarios. We will focus on the first +has more background on these different scenarios. We will focus on the first kind of taint in this doc, since it is the kind required for the "dedicated nodes" use case. Implementing dedicated nodes using taints and tolerations is straightforward: in essence, a node that @@ -264,7 +264,7 @@ their taint. Thus we need to ensure that a new node does not become "Ready" unti configured with its taints. One way to do this is to have an admission controller that adds the taint whenever a Node object is created. -A quota policy may want to treat nodes diffrently based on what taints, if any, +A quota policy may want to treat nodes differently based on what taints, if any, they have. For example, if a particular namespace is only allowed to access dedicated nodes, then it may be convenient to give the namespace unlimited quota. (To use finite quota, you'd have to size the namespace's quota to the sum of the sizes of the machines in the diff --git a/docs/devel/README.md b/docs/devel/README.md index 4128e00179e..727fbfa6adb 100644 --- a/docs/devel/README.md +++ b/docs/devel/README.md @@ -79,7 +79,7 @@ Guide](../admin/README.md). Document style advice for contributors. * **Running a cluster locally** ([running-locally.md](running-locally.md)): - A fast and lightweight local cluster deployment for developement. + A fast and lightweight local cluster deployment for development. ## Developing against the Kubernetes API diff --git a/docs/devel/api_changes.md b/docs/devel/api_changes.md index 0c039aab187..585f015d9e7 100644 --- a/docs/devel/api_changes.md +++ b/docs/devel/api_changes.md @@ -306,7 +306,7 @@ removed value as deprecated but allowed). This is actually a special case of a new representation, discussed above. For [Unions](api-conventions.md), sets of fields where at most one should be set, -it is acceptible to add a new option to the union if the [appropriate conventions] +it is acceptable to add a new option to the union if the [appropriate conventions] were followed in the original object. Removing an option requires following the deprecation process. diff --git a/docs/devel/e2e-tests.md b/docs/devel/e2e-tests.md index a9c440c73ef..0c75af705c2 100644 --- a/docs/devel/e2e-tests.md +++ b/docs/devel/e2e-tests.md @@ -36,7 +36,7 @@ Documentation for other releases can be found at ## Overview -End-to-end (e2e) tests for Kubernetes provide a mechanism to test end-to-end behavior of the system, and is the last signal to ensure end user operations match developer specifications. Although unit and integration tests should ideally provide a good signal, the reality is in a distributed system like Kubernetes it is not uncommon that a minor change may pass all unit and integration tests, but cause unforseen changes at the system level. e2e testing is very costly, both in time to run tests and difficulty debugging, though: it takes a long time to build, deploy, and exercise a cluster. Thus, the primary objectives of the e2e tests are to ensure a consistent and reliable behavior of the kubernetes code base, and to catch hard-to-test bugs before users do, when unit and integration tests are insufficient. +End-to-end (e2e) tests for Kubernetes provide a mechanism to test end-to-end behavior of the system, and is the last signal to ensure end user operations match developer specifications. Although unit and integration tests should ideally provide a good signal, the reality is in a distributed system like Kubernetes it is not uncommon that a minor change may pass all unit and integration tests, but cause unforeseen changes at the system level. e2e testing is very costly, both in time to run tests and difficulty debugging, though: it takes a long time to build, deploy, and exercise a cluster. Thus, the primary objectives of the e2e tests are to ensure a consistent and reliable behavior of the kubernetes code base, and to catch hard-to-test bugs before users do, when unit and integration tests are insufficient. The e2e tests in kubernetes are built atop of [Ginkgo](http://onsi.github.io/ginkgo/) and [Gomega](http://onsi.github.io/gomega/). There are a host of features that this BDD testing framework provides, and it is recommended that the developer read the documentation prior to diving into the tests. diff --git a/docs/devel/kubemark-guide.md b/docs/devel/kubemark-guide.md index c2addc8fbc9..8edc4b0a427 100644 --- a/docs/devel/kubemark-guide.md +++ b/docs/devel/kubemark-guide.md @@ -89,7 +89,7 @@ to update docker image address if you’re not using GCR and default image name* - Waits until all HollowNodes are in the Running phase (*will work exactly the same everywhere*) \* Port 443 is a secured port on the master machine which is used for all external communication with the API server. In the last sentence *external* means all traffic -comming from other machines, including all the Nodes, not only from outside of the cluster. Currently local components, i.e. ControllerManager and Scheduler talk with API server using insecure port 8080. +coming from other machines, including all the Nodes, not only from outside of the cluster. Currently local components, i.e. ControllerManager and Scheduler talk with API server using insecure port 8080. ### Running e2e tests on Kubemark cluster diff --git a/docs/devel/update-release-docs.md b/docs/devel/update-release-docs.md index e94c5442574..e0c04047baa 100644 --- a/docs/devel/update-release-docs.md +++ b/docs/devel/update-release-docs.md @@ -104,7 +104,7 @@ The high level steps to update docs in an existing collection are: ## Updating docs on HEAD [Development guide](development.md) provides general instructions on how to contribute to kubernetes github repo. -[Docs how to guide](how-to-doc.md) provides conventions to follow while writting docs. +[Docs how to guide](how-to-doc.md) provides conventions to follow while writing docs. ## Updating docs in release branch diff --git a/docs/proposals/federated-api-servers.md b/docs/proposals/federated-api-servers.md index 4e575733235..d0c97872df6 100644 --- a/docs/proposals/federated-api-servers.md +++ b/docs/proposals/federated-api-servers.md @@ -47,7 +47,7 @@ federated servers. developers to expose their APIs as a separate server and enabling the cluster admin to use it without any change to the core kubernetes reporsitory, we unblock these APIs. -* Place for staging experimental APIs: New APIs can remain in seperate +* Place for staging experimental APIs: New APIs can remain in separate federated servers until they become stable, at which point, they can be moved to the core kubernetes master, if appropriate. * Ensure that new APIs follow kubernetes conventions: Without the mechanism diff --git a/docs/proposals/federation-lite.md b/docs/proposals/federation-lite.md index 393e0883352..09640bbc6dd 100644 --- a/docs/proposals/federation-lite.md +++ b/docs/proposals/federation-lite.md @@ -37,7 +37,7 @@ Full Ubernetes will offer sophisticated federation between multiple kuberentes clusters, offering true high-availability, multiple provider support & cloud-bursting, multiple region support etc. However, many users have expressed a desire for a "reasonably" high-available cluster, that runs in -multiple zones on GCE or availablity zones in AWS, and can tolerate the failure +multiple zones on GCE or availability zones in AWS, and can tolerate the failure of a single zone without the complexity of running multiple clusters. Ubernetes-Lite aims to deliver exactly that functionality: to run a single @@ -88,7 +88,7 @@ The implementation of this will be described in the implementation section. Note that zone spreading is 'best effort'; zones are just be one of the factors in making scheduling decisions, and thus it is not guaranteed that pods will -spread evenly across zones. However, this is likely desireable: if a zone is +spread evenly across zones. However, this is likely desirable: if a zone is overloaded or failing, we still want to schedule the requested number of pods. ### Volume affinity diff --git a/docs/proposals/federation.md b/docs/proposals/federation.md index 2b63bde47ea..f2d1fcd11ca 100644 --- a/docs/proposals/federation.md +++ b/docs/proposals/federation.md @@ -111,7 +111,7 @@ and cheap network within each cluster. There is also assumed to be some degree of failure correlation across a cluster, i.e. whole clusters are expected to fail, at least occasionally (due to cluster-wide power and network failures, natural -disasters etc). Clusters are often relatively homogenous in that all +disasters etc). Clusters are often relatively homogeneous in that all compute nodes are typically provided by a single cloud provider or hardware vendor, and connected by a common, unified network fabric. But these are not hard requirements of Kubernetes. diff --git a/docs/proposals/flannel-integration.md b/docs/proposals/flannel-integration.md index c4cfc4e7c6c..5b12219178a 100644 --- a/docs/proposals/flannel-integration.md +++ b/docs/proposals/flannel-integration.md @@ -129,7 +129,7 @@ The first is accomplished in this PR, while a timeline for 2. and 3. are TDB. To - Get: Handle a watch on leases * `/network/leases/subnet`: - Put: This is a request for a lease. If the nodecontroller is allocating CIDRs we can probably just no-op. -* `/network/reservations`: TDB, we can probably use this to accomodate node controller allocating CIDR instead of flannel requesting it +* `/network/reservations`: TDB, we can probably use this to accommodate node controller allocating CIDR instead of flannel requesting it The ick-iest part of this implementation is going to the the `GET /network/leases`, i.e the watch proxy. We can side-step by waiting for a more generic Kubernetes resource. However, we can also implement it as follows: * Watch all nodes, ignore heartbeats @@ -152,7 +152,7 @@ This proposal is really just a call for community help in writing a Kubernetes x * Flannel daemon in privileged pod * Flannel server talks to apiserver, described in proposal above * HTTPs between flannel daemon/server -* Investigate flannel server runing on every node (as done in the reference implementation mentioned above) +* Investigate flannel server running on every node (as done in the reference implementation mentioned above) * Use flannel reservation mode to support node controller podcidr alloction diff --git a/docs/proposals/node-allocatable.md b/docs/proposals/node-allocatable.md index a3520a41ad0..ec768f8c827 100644 --- a/docs/proposals/node-allocatable.md +++ b/docs/proposals/node-allocatable.md @@ -129,8 +129,8 @@ behavior is equivalent to the 1.1 behavior with scheduling based on Capacity. In the initial implementation, `SystemReserved` will be functionally equivalent to [`KubeReserved`](#system-reserved), but with a different semantic meaning. While KubeReserved -designates resources set asside for kubernetes components, SystemReserved designates resources set -asside for non-kubernetes components (currently this is reported as all the processes lumped +designates resources set aside for kubernetes components, SystemReserved designates resources set +aside for non-kubernetes components (currently this is reported as all the processes lumped together in the `/system` raw container). ## Issues @@ -159,7 +159,7 @@ according to `KubeReserved`. **API server expects `Allocatable` but does not receive it:** If the kubelet is older and does not provide `Allocatable` in the `NodeStatus`, then `Allocatable` will be [defaulted](../../pkg/api/v1/defaults.go) to - `Capacity` (which will yield todays behavior of scheduling based on capacity). + `Capacity` (which will yield today's behavior of scheduling based on capacity). ### 3rd party schedulers diff --git a/docs/proposals/performance-related-monitoring.md b/docs/proposals/performance-related-monitoring.md index e6612fb4156..ba07ad24e12 100644 --- a/docs/proposals/performance-related-monitoring.md +++ b/docs/proposals/performance-related-monitoring.md @@ -40,7 +40,7 @@ Main reason behind doing this is to understand what kind of monitoring needs to Issue https://github.com/kubernetes/kubernetes/issues/14216 was opened because @spiffxp observed a regression in scheduler performance in 1.1 branch in comparison to `old` 1.0 cut. In the end it turned out the be caused by `--v=4` (instead of default `--v=2`) flag in the scheduler together with the flag `--logtostderr` which disables batching of -log lines and a number of loging without explicit V level. This caused weird behavior of the whole component. +log lines and a number of logging without explicit V level. This caused weird behavior of the whole component. Because we now know that logging may have big performance impact we should consider instrumenting logging mechanism and compute statistics such as number of logged messages, total and average size of them. Each binary should be responsible for exposing its metrics. An unaccounted but way too big number of days, if not weeks, of engineering time was @@ -137,7 +137,7 @@ Basic ideas: We should monitor other aspects of the system, which may indicate saturation of some component. Basic ideas: -- queue lenght for queues in the system, +- queue length for queues in the system, - wait time for WaitGroups. diff --git a/docs/proposals/selinux.md b/docs/proposals/selinux.md index 27e4087da52..f98ac5daa66 100644 --- a/docs/proposals/selinux.md +++ b/docs/proposals/selinux.md @@ -194,7 +194,7 @@ From the above, we know that label management must be applied: 3. To some volume types *sometimes* Volumes should be relabeled with the correct SELinux context. Docker has this capability today; it -is desireable for other container runtime implementations to provide similar functionality. +is desirable for other container runtime implementations to provide similar functionality. Relabeling should be an optional aspect of a volume plugin to accommodate: diff --git a/docs/proposals/templates.md b/docs/proposals/templates.md index fad8d27b518..b4e9f789393 100644 --- a/docs/proposals/templates.md +++ b/docs/proposals/templates.md @@ -178,7 +178,7 @@ can be instantiated multiple times within the same namespace, as long as a diffe instantiation. The resulting objects will be independent from a replica/load-balancing perspective. Generation of parameter values for fields such as Secrets will be delegated to an [admission controller/initializer/finalizer](https://github.com/kubernetes/kubernetes/issues/3585) rather than being solved by the template processor. Some discussion about a generation -service is occuring [here](https://github.com/kubernetes/kubernetes/issues/12732) +service is occurring [here](https://github.com/kubernetes/kubernetes/issues/12732) Labels to be assigned to all objects could also be generated in addition to, or instead of, allowing labels to be supplied in the Template definition. diff --git a/docs/proposals/workflow.md b/docs/proposals/workflow.md index 433b74ccd39..f225b0a1278 100644 --- a/docs/proposals/workflow.md +++ b/docs/proposals/workflow.md @@ -258,7 +258,7 @@ The events associated to `Workflow`s will be: ## Kubectl -Kubectl will be modified to display workflows. More particulary the `describe` command +Kubectl will be modified to display workflows. More particularly the `describe` command will display all the steps with their status. Steps will be topologically sorted and each dependency will be decorated with its status (wether or not step is waitin for dependency). diff --git a/docs/user-guide/ingress.md b/docs/user-guide/ingress.md index ebd5e53b395..6dca12fb9fb 100644 --- a/docs/user-guide/ingress.md +++ b/docs/user-guide/ingress.md @@ -55,7 +55,7 @@ Documentation for other releases can be found at __Terminology__ -Throughout this doc you will see a few terms that are sometimes used interchangably elsewhere, that might cause confusion. This section attempts to clarify them. +Throughout this doc you will see a few terms that are sometimes used interchangeably elsewhere, that might cause confusion. This section attempts to clarify them. * Node: A single virtual or physical machine in a Kubernetes cluster. * Cluster: A group of nodes firewalled from the internet, that are the primary compute resources managed by Kubernetes. diff --git a/docs/user-guide/jobs.md b/docs/user-guide/jobs.md index ea7c2163c9a..5486acff8ec 100644 --- a/docs/user-guide/jobs.md +++ b/docs/user-guide/jobs.md @@ -239,7 +239,7 @@ parallelism, for a variety or reasons: - For Fixed Completion Count jobs, the actual number of pods running in parallel will not exceed the number of remaining completions. Higher values of `.spec.parallelism` are effectively ignored. -- For work queue jobs, no new pods are started after any pod has succeded -- remaining pods are allowed to complete, however. +- For work queue jobs, no new pods are started after any pod has succeeded -- remaining pods are allowed to complete, however. - If the controller has not had time to react. - If the controller failed to create pods for any reason (lack of ResourceQuota, lack of permission, etc), then there may be fewer pods than requested. diff --git a/docs/user-guide/kubectl-overview.md b/docs/user-guide/kubectl-overview.md index 0a0aa7bc194..afac897efaa 100644 --- a/docs/user-guide/kubectl-overview.md +++ b/docs/user-guide/kubectl-overview.md @@ -305,7 +305,7 @@ Use the following set of examples to help you familiarize yourself with running // Return a snapshot of the logs from pod . $ kubectl logs - // Start streaming the logs from pod . This is similiar to the 'tail -f' Linux command. + // Start streaming the logs from pod . This is similar to the 'tail -f' Linux command. $ kubectl logs -f diff --git a/docs/user-guide/replication-controller.md b/docs/user-guide/replication-controller.md index b45f792e882..df7fdbdf60b 100644 --- a/docs/user-guide/replication-controller.md +++ b/docs/user-guide/replication-controller.md @@ -68,7 +68,7 @@ Documentation for other releases can be found at ## What is a _replication controller_? A _replication controller_ ensures that a specified number of pod "replicas" are running at any one -time. In other words, a replication controller makes sure that a pod or homogenous set of pods are +time. In other words, a replication controller makes sure that a pod or homogeneous set of pods are always up and available. If there are too many pods, it will kill some. If there are too few, the replication controller will start more. Unlike manually created pods, the pods maintained by a diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index 5320762af87..a12c75877b9 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -108,7 +108,7 @@ They are a core concept used by two additional Kubernetes building blocks: Repli ## Replication Controllers -OK, now you know how to make awesome, multi-container, labeled pods and you want to use them to build an application, you might be tempted to just start building a whole bunch of individual pods, but if you do that, a whole host of operational concerns pop up. For example: how will you scale the number of pods up or down and how will you ensure that all pods are homogenous? +OK, now you know how to make awesome, multi-container, labeled pods and you want to use them to build an application, you might be tempted to just start building a whole bunch of individual pods, but if you do that, a whole host of operational concerns pop up. For example: how will you scale the number of pods up or down and how will you ensure that all pods are homogeneous? Replication controllers are the objects to answer these questions. A replication controller combines a template for pod creation (a "cookie-cutter" if you will) and a number of desired replicas, into a single Kubernetes object. The replication controller also contains a label selector that identifies the set of objects managed by the replication controller. The replication controller constantly measures the size of this set relative to the desired size, and takes action by creating or deleting pods. diff --git a/examples/cassandra/java/src/io/k8s/cassandra/KubernetesSeedProvider.java b/examples/cassandra/java/src/io/k8s/cassandra/KubernetesSeedProvider.java index 58978de96a4..1760f12678c 100644 --- a/examples/cassandra/java/src/io/k8s/cassandra/KubernetesSeedProvider.java +++ b/examples/cassandra/java/src/io/k8s/cassandra/KubernetesSeedProvider.java @@ -117,7 +117,7 @@ public class KubernetesSeedProvider implements SeedProvider { logger.info("Getting endpoints from " + url); HttpsURLConnection conn = (HttpsURLConnection)url.openConnection(); - // TODO: Remove this once the CA cert is propogated everywhere, and replace + // TODO: Remove this once the CA cert is propagated everywhere, and replace // with loading the CA cert. conn.setSSLSocketFactory(ctx.getSocketFactory()); conn.setHostnameVerifier(trustAllHosts); diff --git a/examples/flexvolume/README.md b/examples/flexvolume/README.md index 5f49e2d06fc..df7173e6cdd 100644 --- a/examples/flexvolume/README.md +++ b/examples/flexvolume/README.md @@ -43,7 +43,7 @@ For example to add a 'cifs' driver, by vendor 'foo' install the driver at: /usr/ ## Plugin details -Driver will be invoked with 'Init' to initalize the driver. It will be invoked with 'attach' to attach the volume and with 'detach' to detach the volume from the kubelet node. It also supports custom mounts using 'mount' and 'unmount' callouts to the driver. +Driver will be invoked with 'Init' to initialize the driver. It will be invoked with 'attach' to attach the volume and with 'detach' to detach the volume from the kubelet node. It also supports custom mounts using 'mount' and 'unmount' callouts to the driver. ### Driver invocation model: diff --git a/examples/javaweb-tomcat-sidecar/README.md b/examples/javaweb-tomcat-sidecar/README.md index 42de090060d..bb87215365f 100644 --- a/examples/javaweb-tomcat-sidecar/README.md +++ b/examples/javaweb-tomcat-sidecar/README.md @@ -41,7 +41,7 @@ This sidecar mode brings a new workflow for Java users: ![](workflow.png?raw=true "Workflow") -As you can see, user can create a `sample:v2` container as sidecar to "provide" war file to Tomcat by copying it to the shared `emptyDir` volume. And Pod will make sure the two containers compose an "atomic" scheduling unit, which is perfect for this case. Thus, your application version management will be totally seperated from web server management. +As you can see, user can create a `sample:v2` container as sidecar to "provide" war file to Tomcat by copying it to the shared `emptyDir` volume. And Pod will make sure the two containers compose an "atomic" scheduling unit, which is perfect for this case. Thus, your application version management will be totally separated from web server management. For example, if you are going to change the configurations of your Tomcat: diff --git a/examples/job/work-queue-2/README.md b/examples/job/work-queue-2/README.md index 6645f03f6ce..ad5066312ab 100644 --- a/examples/job/work-queue-2/README.md +++ b/examples/job/work-queue-2/README.md @@ -160,7 +160,7 @@ host="redis" q = rediswq.RedisWQ(name="job2", host="redis") print("Worker with sessionID: " + q.sessionID()) -print("Inital queue state: empty=" + str(q.empty())) +print("Initial queue state: empty=" + str(q.empty())) while not q.empty(): item = q.lease(lease_secs=10, block=True, timeout=2) if item is not None: @@ -283,7 +283,7 @@ Events: $ kubectl logs pods/job-wq-2-7r7b2 Worker with sessionID: bbd72d0a-9e5c-4dd6-abf6-416cc267991f -Inital queue state: empty=False +Initial queue state: empty=False Working on banana Working on date Working on lemon diff --git a/examples/job/work-queue-2/worker.py b/examples/job/work-queue-2/worker.py index ba424147577..6bf2abc5f2d 100755 --- a/examples/job/work-queue-2/worker.py +++ b/examples/job/work-queue-2/worker.py @@ -24,7 +24,7 @@ host="redis" q = rediswq.RedisWQ(name="job2", host="redis") print("Worker with sessionID: " + q.sessionID()) -print("Inital queue state: empty=" + str(q.empty())) +print("Initial queue state: empty=" + str(q.empty())) while not q.empty(): item = q.lease(lease_secs=10, block=True, timeout=2) if item is not None: diff --git a/examples/k8petstore/docker-machine-dev.sh b/examples/k8petstore/docker-machine-dev.sh index 48d8684181b..ebe728e381e 100755 --- a/examples/k8petstore/docker-machine-dev.sh +++ b/examples/k8petstore/docker-machine-dev.sh @@ -64,7 +64,7 @@ function build_containers() { function runk8petstore() { ### Finally, run the application. - ### This app is gauranteed to be a clean run using all the source. + ### This app is guaranteed to be a clean run using all the source. ### You can use it to iteratively test/deploy k8petstore and make new changes. ### TODO, add slaves. diff --git a/examples/k8petstore/k8petstore-loadbalancer.sh b/examples/k8petstore/k8petstore-loadbalancer.sh index 62b2b607cca..0bd55b01068 100755 --- a/examples/k8petstore/k8petstore-loadbalancer.sh +++ b/examples/k8petstore/k8petstore-loadbalancer.sh @@ -27,7 +27,7 @@ _SECONDS=1000 # number of seconds to measure throughput. FE="1" # amount of Web server LG="1" # amount of load generators SLAVE="1" # amount of redis slaves -TEST="1" # 0 = Dont run tests, 1 = Do run tests. +TEST="1" # 0 = Don't run tests, 1 = Do run tests. NS="default" # namespace kubectl="${1:-$kubectl}" @@ -36,7 +36,7 @@ _SECONDS="${3:-$_SECONDS}" # number of seconds to measure throughput. FE="${4:-$FE}" # amount of Web server LG="${5:-$LG}" # amount of load generators SLAVE="${6:-$SLAVE}" # amount of redis slaves -TEST="${7:-$TEST}" # 0 = Dont run tests, 1 = Do run tests. +TEST="${7:-$TEST}" # 0 = Don't run tests, 1 = Do run tests. NS="${8:-$NS}" # namespace echo "Running w/ args: kubectl $kubectl version $VERSION sec $_SECONDS fe $FE lg $LG slave $SLAVE test $TEST NAMESPACE $NS" diff --git a/examples/k8petstore/k8petstore-nodeport.sh b/examples/k8petstore/k8petstore-nodeport.sh index 49360409c81..05b1221a170 100755 --- a/examples/k8petstore/k8petstore-nodeport.sh +++ b/examples/k8petstore/k8petstore-nodeport.sh @@ -28,7 +28,7 @@ _SECONDS=1000 # number of seconds to measure throughput. FE="1" # amount of Web server LG="1" # amount of load generators SLAVE="1" # amount of redis slaves -TEST="1" # 0 = Dont run tests, 1 = Do run tests. +TEST="1" # 0 = Don't run tests, 1 = Do run tests. NS="default" # namespace NODE_PORT=30291 #nodePort, see fe-s.json @@ -38,7 +38,7 @@ _SECONDS="${3:-$_SECONDS}" # number of seconds to measure throughput. FE="${4:-$FE}" # amount of Web server LG="${5:-$LG}" # amount of load generators SLAVE="${6:-$SLAVE}" # amount of redis slaves -TEST="${7:-$TEST}" # 0 = Dont run tests, 1 = Do run tests. +TEST="${7:-$TEST}" # 0 = Don't run tests, 1 = Do run tests. NS="${8:-$NS}" # namespace NODE_PORT="${9:-$NODE_PORT}" #nodePort, see fe-s.json echo "Running w/ args: kubectl $kubectl version $VERSION sec $_SECONDS fe $FE lg $LG slave $SLAVE test = $TEST, NAMESPACE = $NS, NODE_PORT = $NODE_PORT" diff --git a/examples/k8petstore/k8petstore.sh b/examples/k8petstore/k8petstore.sh index 285c2ddcc6d..759ba05e4a1 100755 --- a/examples/k8petstore/k8petstore.sh +++ b/examples/k8petstore/k8petstore.sh @@ -28,7 +28,7 @@ _SECONDS=1000 # number of seconds to measure throughput. FE="1" # amount of Web server LG="1" # amount of load generators SLAVE="1" # amount of redis slaves -TEST="1" # 0 = Dont run tests, 1 = Do run tests. +TEST="1" # 0 = Don't run tests, 1 = Do run tests. NS="default" # namespace kubectl="${1:-$kubectl}" @@ -38,7 +38,7 @@ _SECONDS="${4:-$_SECONDS}" # number of seconds to measure throughput. FE="${5:-$FE}" # amount of Web server LG="${6:-$LG}" # amount of load generators SLAVE="${7:-$SLAVE}" # amount of redis slaves -TEST="${8:-$TEST}" # 0 = Dont run tests, 1 = Do run tests. +TEST="${8:-$TEST}" # 0 = Don't run tests, 1 = Do run tests. NS="${9:-$NS}" # namespace echo "Running w/ args: kubectl $kubectl version $VERSION ip $PUBLIC_IP sec $_SECONDS fe $FE lg $LG slave $SLAVE test $TEST NAMESPACE $NS" diff --git a/examples/mysql-galera/README.md b/examples/mysql-galera/README.md index 329cbfef808..266d7ea6bb8 100644 --- a/examples/mysql-galera/README.md +++ b/examples/mysql-galera/README.md @@ -47,7 +47,7 @@ By defaults, there are only three pods (hence replication controllers) for this When the replication controller is created, it results in the corresponding container to start, run an entrypoint script that installs the mysql system tables, set up users, and build up a list of servers that is used with the galera parameter ```wsrep_cluster_address```. This is a list of running nodes that galera uses for election of a node to obtain SST (Single State Transfer) from. -Note: Kubernetes best-practices is to pre-create the services for each controller, and the configuration files which contain the service and replication controller for each node, when created, will result in both a service and replication contrller running for the given node. An important thing to know is that it's important that initally pxc-node1.yaml be processed first and no other pxc-nodeN services that don't have corresponding replication controllers should exist. The reason for this is that if there is a node in ```wsrep_clsuter_address``` without a backing galera node there will be nothing to obtain SST from which will cause the node to shut itself down and the container in question to exit (and another soon relaunched, repeatedly). +Note: Kubernetes best-practices is to pre-create the services for each controller, and the configuration files which contain the service and replication controller for each node, when created, will result in both a service and replication contrller running for the given node. An important thing to know is that it's important that initially pxc-node1.yaml be processed first and no other pxc-nodeN services that don't have corresponding replication controllers should exist. The reason for this is that if there is a node in ```wsrep_clsuter_address``` without a backing galera node there will be nothing to obtain SST from which will cause the node to shut itself down and the container in question to exit (and another soon relaunched, repeatedly). First, create the overall cluster service that will be used to connect to the cluster: diff --git a/examples/mysql-galera/image/docker-entrypoint.sh b/examples/mysql-galera/image/docker-entrypoint.sh index 961fc7cd639..58657303b32 100755 --- a/examples/mysql-galera/image/docker-entrypoint.sh +++ b/examples/mysql-galera/image/docker-entrypoint.sh @@ -37,7 +37,7 @@ if [ "$1" = 'mysqld' ]; then DATADIR="$("$@" --verbose --help 2>/dev/null | awk '$1 == "datadir" { print $2; exit }')" # only check if system tables not created from mysql_install_db and permissions - # set with initial SQL script before proceding to build SQL script + # set with initial SQL script before proceeding to build SQL script if [ ! -d "$DATADIR/mysql" ]; then # fail if user didn't supply a root password if [ -z "$MYSQL_ROOT_PASSWORD" -a -z "$MYSQL_ALLOW_EMPTY_PASSWORD" ]; then @@ -95,7 +95,7 @@ EOSQL chown -R mysql:mysql "$DATADIR" fi -# if cluster is turned on, then procede to build cluster setting strings +# if cluster is turned on, then proceed to build cluster setting strings # that will be interpolated into the config files if [ -n "$GALERA_CLUSTER" ]; then # this is the Single State Transfer user (SST, initial dump or xtrabackup user) diff --git a/examples/selenium/README.md b/examples/selenium/README.md index be273483c3f..5a643a3e0d1 100644 --- a/examples/selenium/README.md +++ b/examples/selenium/README.md @@ -77,7 +77,7 @@ export PODNAME=`kubectl get pods --selector="app=selenium-hub" --output=template kubectl port-forward --pod=$PODNAME 4444:4444 ``` -In a seperate terminal, you can now check the status. +In a separate terminal, you can now check the status. ```console curl http://localhost:4444 diff --git a/hack/gen-swagger-doc/example-output/definitions.html b/hack/gen-swagger-doc/example-output/definitions.html index 09691c8c0ac..d6f1061854a 100644 --- a/hack/gen-swagger-doc/example-output/definitions.html +++ b/hack/gen-swagger-doc/example-output/definitions.html @@ -3364,7 +3364,7 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; } - + diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index e0d60b77be4..cd695bc47ef 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -289,7 +289,7 @@ case ${JOB_NAME} in export NUM_NODES="100" # Reduce logs verbosity export TEST_CLUSTER_LOG_LEVEL="--v=2" - # TODO: Remove when we figure out the reason for ocassional failures #19048 + # TODO: Remove when we figure out the reason for occasional failures #19048 export KUBELET_TEST_LOG_LEVEL="--v=4" # Increase resync period to simulate production export TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" @@ -366,7 +366,7 @@ case ${JOB_NAME} in export E2E_TEST="false" export USE_KUBEMARK="true" export KUBEMARK_TESTS="\[Feature:Performance\]" - # Override defaults to be indpendent from GCE defaults and set kubemark parameters + # Override defaults to be independent from GCE defaults and set kubemark parameters export NUM_NODES="10" export MASTER_SIZE="n1-standard-2" export NODE_SIZE="n1-standard-1" @@ -385,7 +385,7 @@ case ${JOB_NAME} in export USE_KUBEMARK="true" export KUBEMARK_TESTS="\[Feature:Performance\]" export FAIL_ON_GCP_RESOURCE_LEAK="false" - # Override defaults to be indpendent from GCE defaults and set kubemark parameters + # Override defaults to be independent from GCE defaults and set kubemark parameters export NUM_NODES="6" export MASTER_SIZE="n1-standard-4" export NODE_SIZE="n1-standard-8" @@ -403,7 +403,7 @@ case ${JOB_NAME} in export USE_KUBEMARK="true" export KUBEMARK_TESTS="\[Feature:Performance\]" export FAIL_ON_GCP_RESOURCE_LEAK="false" - # Override defaults to be indpendent from GCE defaults and set kubemark parameters + # Override defaults to be independent from GCE defaults and set kubemark parameters # We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way. export NUM_NODES="11" export MASTER_SIZE="n1-standard-4" diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 7464683b4d1..0eef0ca6d63 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -437,7 +437,7 @@ runTests() { kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:' # Command kubectl label pods valid-pod new-name=new-valid-pod "${kube_flags[@]}" - # Post-conditon: valid-pod is labelled + # Post-condition: valid-pod is labelled kube::test::get_object_assert 'pod valid-pod' "{{range$labels_field}}{{.}}:{{end}}" 'valid-pod:new-valid-pod:' ### Delete POD by label diff --git a/hack/update-codecgen.sh b/hack/update-codecgen.sh index de766f7f728..37ecc41476f 100755 --- a/hack/update-codecgen.sh +++ b/hack/update-codecgen.sh @@ -112,11 +112,11 @@ for current in "${index[@]}"; do pushd "$(dirname ${file})" > /dev/null base_file=$(basename "${file}") base_generated_file=$(basename "${generated_file}") - # We use '-d 1234' flag to have a deterministic output everytime. + # We use '-d 1234' flag to have a deterministic output every time. # The constant was just randomly chosen. echo Running ${CODECGEN} -d 1234 -o "${base_generated_file}" "${base_file}" ${CODECGEN} -d 1234 -o "${base_generated_file}" "${base_file}" - # Add boilerplate at the begining of the generated file. + # Add boilerplate at the beginning of the generated file. sed 's/YEAR/2015/' "${initial_dir}/hack/boilerplate/boilerplate.go.txt" > "${base_generated_file}.tmp" cat "${base_generated_file}" >> "${base_generated_file}.tmp" mv "${base_generated_file}.tmp" "${base_generated_file}" diff --git a/hack/verify-api-reference-docs.sh b/hack/verify-api-reference-docs.sh index 456c31108cb..42bc9659e07 100755 --- a/hack/verify-api-reference-docs.sh +++ b/hack/verify-api-reference-docs.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Verifies that api reference docs are upto date. +# Verifies that api reference docs are up to date. set -o errexit set -o nounset diff --git a/hack/verify-codecgen.sh b/hack/verify-codecgen.sh index a3746d62c05..e9cbf4c8cf3 100755 --- a/hack/verify-codecgen.sh +++ b/hack/verify-codecgen.sh @@ -97,10 +97,10 @@ for current in ${index[@]}; do base_generated_file=$(basename "${generated_file}") # temporarily move the generated file to a non-go file so it doesn't influence the verify codecgen mv "${base_generated_file}" "${base_generated_file}.bak" - # We use '-d 1234' flag to have a deterministic output everytime. + # We use '-d 1234' flag to have a deterministic output every time. # The constant was just randomly chosen. ${CODECGEN} -d 1234 -o "${base_generated_file}.1tmp" "${base_file}" - # Add boilerplate at the begining of the generated file. + # Add boilerplate at the beginning of the generated file. sed 's/YEAR/2015/' "${initial_dir}/hack/boilerplate/boilerplate.go.txt" > "${base_generated_file}.tmp" cat "${base_generated_file}.1tmp" >> "${base_generated_file}.tmp" rm "${base_generated_file}.1tmp" diff --git a/hack/verify-godeps.sh b/hack/verify-godeps.sh index b98897c3d36..dcec60fc1ae 100755 --- a/hack/verify-godeps.sh +++ b/hack/verify-godeps.sh @@ -93,7 +93,7 @@ fi # is an intentionally broken symlink. Linux can use --no-dereference. OS X cannot. # So we --exclude='symlink' so diff -r doesn't die following a bad symlink. if ! _out="$(diff -Naupr --exclude='symlink' ${KUBE_ROOT}/Godeps/_workspace/src ${_kubetmp}/Godeps/_workspace/src)"; then - echo "Your godeps changes are not reproducable" + echo "Your godeps changes are not reproducible" echo "${_out}" exit 1 fi diff --git a/hooks/pre-commit b/hooks/pre-commit index d4234f78f75..d2e19f99460 100755 --- a/hooks/pre-commit +++ b/hooks/pre-commit @@ -16,10 +16,10 @@ else fi echo "${reset}" -# Check if changes to Godeps are reproducable... +# Check if changes to Godeps are reproducible... files=($(git diff --cached --name-only --diff-filter ACM | grep "Godeps")) if [[ "${#files[@]}" -ne 0 ]]; then - echo -ne "Check if changes to Godeps are reproducable (this is pretty slow)..." + echo -ne "Check if changes to Godeps are reproducible (this is pretty slow)..." if ! OUT=$("hack/verify-godeps.sh" 2>&1); then echo echo "${red}${OUT}" diff --git a/pkg/api/rest/rest.go b/pkg/api/rest/rest.go index 584670f7789..d07023dad5d 100644 --- a/pkg/api/rest/rest.go +++ b/pkg/api/rest/rest.go @@ -104,7 +104,7 @@ type GetterWithOptions interface { // value of the request path below the object will be included as the named // string in the serialization of the runtime object. E.g., returning "path" // will convert the trailing request scheme value to "path" in the map[string][]string - // passed to the convertor. + // passed to the converter. NewGetOptions() (runtime.Object, bool, string) } diff --git a/pkg/api/types.go b/pkg/api/types.go index 964280317bf..7a586bd7180 100644 --- a/pkg/api/types.go +++ b/pkg/api/types.go @@ -429,7 +429,7 @@ const ( StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) ) -// Protocol defines network protocols supported for things like conatiner ports. +// Protocol defines network protocols supported for things like container ports. type Protocol string const ( @@ -1017,7 +1017,7 @@ type ContainerStatus struct { Name string `json:"name"` State ContainerState `json:"state,omitempty"` LastTerminationState ContainerState `json:"lastState,omitempty"` - // Ready specifies whether the conatiner has passed its readiness check. + // Ready specifies whether the container has passed its readiness check. Ready bool `json:"ready"` // Note that this is calculated from dead containers. But those containers are subject to // garbage collection. This value will get capped at 5 by GC. @@ -1934,7 +1934,7 @@ type PodLogOptions struct { // Only one of sinceSeconds or sinceTime may be specified. SinceSeconds *int64 // An RFC3339 timestamp from which to show logs. If this value - // preceeds the time a pod was started, only logs since the pod start will be returned. + // precedes the time a pod was started, only logs since the pod start will be returned. // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. SinceTime *unversioned.Time diff --git a/pkg/api/v1/conversion.go b/pkg/api/v1/conversion.go index 0adf467f9af..56ca88f3677 100644 --- a/pkg/api/v1/conversion.go +++ b/pkg/api/v1/conversion.go @@ -322,7 +322,7 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi return err } - // the host namespace fields have to be handled here for backward compatibilty + // the host namespace fields have to be handled here for backward compatibility // with v1.0.0 out.HostPID = in.SecurityContext.HostPID out.HostNetwork = in.SecurityContext.HostNetwork diff --git a/pkg/api/v1/types.go b/pkg/api/v1/types.go index d45f9a1837c..97111982321 100644 --- a/pkg/api/v1/types.go +++ b/pkg/api/v1/types.go @@ -630,7 +630,7 @@ const ( StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) ) -// Protocol defines network protocols supported for things like conatiner ports. +// Protocol defines network protocols supported for things like container ports. type Protocol string const ( @@ -1831,7 +1831,7 @@ type ServiceSpec struct { LoadBalancerIP string `json:"loadBalancerIP,omitempty"` } -// ServicePort conatins information on service's port. +// ServicePort contains information on service's port. type ServicePort struct { // The name of this port within the service. This must be a DNS_LABEL. // All ports within a ServiceSpec must have unique names. This maps to @@ -2346,7 +2346,7 @@ type PodLogOptions struct { // Only one of sinceSeconds or sinceTime may be specified. SinceSeconds *int64 `json:"sinceSeconds,omitempty"` // An RFC3339 timestamp from which to show logs. If this value - // preceeds the time a pod was started, only logs since the pod start will be returned. + // precedes the time a pod was started, only logs since the pod start will be returned. // If this value is in the future, no logs will be returned. // Only one of sinceSeconds or sinceTime may be specified. SinceTime *unversioned.Time `json:"sinceTime,omitempty"` diff --git a/pkg/api/v1/types_swagger_doc_generated.go b/pkg/api/v1/types_swagger_doc_generated.go index 8521a3aa065..40e63b5dae1 100644 --- a/pkg/api/v1/types_swagger_doc_generated.go +++ b/pkg/api/v1/types_swagger_doc_generated.go @@ -1126,7 +1126,7 @@ var map_PodLogOptions = map[string]string{ "follow": "Follow the log stream of the pod. Defaults to false.", "previous": "Return previous terminated container logs. Defaults to false.", "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "sinceTime": "An RFC3339 timestamp from which to show logs. If this value preceeds the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", @@ -1496,7 +1496,7 @@ func (ServiceList) SwaggerDoc() map[string]string { } var map_ServicePort = map[string]string{ - "": "ServicePort conatins information on service's port.", + "": "ServicePort contains information on service's port.", "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", "protocol": "The IP protocol for this port. Supports \"TCP\" and \"UDP\". Default is TCP.", "port": "The port that will be exposed by this service.", diff --git a/pkg/apiserver/api_installer.go b/pkg/apiserver/api_installer.go index 8d55f1deba4..bc497ca2ca6 100644 --- a/pkg/apiserver/api_installer.go +++ b/pkg/apiserver/api_installer.go @@ -100,7 +100,7 @@ func (a *APIInstaller) NewWebService() *restful.WebService { ws.Path(a.prefix) // a.prefix contains "prefix/group/version" ws.Doc("API at " + a.prefix) - // Backwards compatibilty, we accepted objects with empty content-type at V1. + // Backwards compatibility, we accepted objects with empty content-type at V1. // If we stop using go-restful, we can default empty content-type to application/json on an // endpoint by endpoint basis ws.Consumes("*/*") diff --git a/pkg/apiserver/handlers_test.go b/pkg/apiserver/handlers_test.go index 604e95bdd89..6619db1dd72 100644 --- a/pkg/apiserver/handlers_test.go +++ b/pkg/apiserver/handlers_test.go @@ -66,7 +66,7 @@ func pathWithPrefix(prefix, resource, namespace, name string) string { // hanging for the long time, // - "short" requests are correctly accounted, i.e. there can be only size of channel passed to the // constructor in flight at any given moment, -// - subsequent "short" requests are rejected instantly with apropriate error, +// - subsequent "short" requests are rejected instantly with appropriate error, // - subsequent "long" requests are handled normally, // - we correctly recover after some "short" requests finish, i.e. we can process new ones. func TestMaxInFlight(t *testing.T) { diff --git a/pkg/client/cache/reflector.go b/pkg/client/cache/reflector.go index fa761d5c593..31b076b43e7 100644 --- a/pkg/client/cache/reflector.go +++ b/pkg/client/cache/reflector.go @@ -83,7 +83,7 @@ var ( // TCP connection. minWatchTimeout = 5 * time.Minute // If we are within 'forceResyncThreshold' from the next planned resync - // and are just before issueing Watch(), resync will be forced now. + // and are just before issuing Watch(), resync will be forced now. forceResyncThreshold = 3 * time.Second // We try to set timeouts for Watch() so that we will finish about // than 'timeoutThreshold' from next planned periodic resync. diff --git a/pkg/client/leaderelection/leaderelection.go b/pkg/client/leaderelection/leaderelection.go index e7a3dac8df3..76776c74d49 100644 --- a/pkg/client/leaderelection/leaderelection.go +++ b/pkg/client/leaderelection/leaderelection.go @@ -37,7 +37,7 @@ limitations under the License. // availability. // // Larger clusters often have a more lenient SLA for API latency. This should be -// taken into account when configuring the client. The rate of leader transistions +// taken into account when configuring the client. The rate of leader transitions // should be monitored and RetryPeriod and LeaseDuration should be increased // until the rate is stable and acceptably low. It's important to keep in mind // when configuring this client that the tolerance to API latency varies inversely @@ -149,7 +149,7 @@ type LeaderElector struct { observedRecord LeaderElectionRecord observedTime time.Time // used to implement OnNewLeader(), may lag slightly from the - // value observedRecord.HolderIdentity if the transistion has + // value observedRecord.HolderIdentity if the transition has // not yet been reported. reportedLeader string } diff --git a/pkg/client/unversioned/discovery_client.go b/pkg/client/unversioned/discovery_client.go index 890f376842c..f43d728fbd5 100644 --- a/pkg/client/unversioned/discovery_client.go +++ b/pkg/client/unversioned/discovery_client.go @@ -67,7 +67,7 @@ type SwaggerSchemaInterface interface { SwaggerSchema(version unversioned.GroupVersion) (*swagger.ApiDeclaration, error) } -// DiscoveryClient implements the functions that dicovery server-supported API groups, +// DiscoveryClient implements the functions that discover server-supported API groups, // versions and resources. type DiscoveryClient struct { *RESTClient diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index 1dacf987f17..02ef3f30d1a 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -342,7 +342,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) { } // delete terminating pods that have been scheduled on - // nonexistant nodes + // nonexistent nodes if !found { nc.forcefullyDeletePod(pod) return diff --git a/pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go b/pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go index 9e6b372f30c..93e50c2e81e 100644 --- a/pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go +++ b/pkg/controller/persistentvolume/persistentvolume_recycler_controller_test.go @@ -227,7 +227,7 @@ func testRecycleFailures(t *testing.T, recycler *PersistentVolumeRecycler, mockC func newFailingMockRecycler(spec *volume.Spec, host volume.VolumeHost, config volume.VolumeConfig) (volume.Recycler, error) { return &failingMockRecycler{ path: spec.PersistentVolume.Spec.HostPath.Path, - errorCount: myMaximumRetry - 1, // fail two times and then successfuly recycle the volume + errorCount: myMaximumRetry - 1, // fail two times and then successfully recycle the volume }, nil } diff --git a/pkg/kubectl/cmd/util/helpers.go b/pkg/kubectl/cmd/util/helpers.go index 6b2ef69dc20..b1b1ac03ce7 100644 --- a/pkg/kubectl/cmd/util/helpers.go +++ b/pkg/kubectl/cmd/util/helpers.go @@ -350,7 +350,7 @@ func ReadConfigDataFromReader(reader io.Reader, source string) ([]byte, error) { return data, nil } -// ReadConfigData reads the bytes from the specified filesytem or network +// ReadConfigData reads the bytes from the specified filesystem or network // location or from stdin if location == "-". // TODO: replace with resource.Builder func ReadConfigData(location string) ([]byte, error) { diff --git a/pkg/kubectl/describe.go b/pkg/kubectl/describe.go index 72a60e72ef8..fe51062862e 100644 --- a/pkg/kubectl/describe.go +++ b/pkg/kubectl/describe.go @@ -45,7 +45,7 @@ import ( ) // Describer generates output for the named resource or an error -// if the output could not be generated. Implementors typically +// if the output could not be generated. Implementers typically // abstract the retrieval of the named object from a remote server. type Describer interface { Describe(namespace, name string) (output string, err error) @@ -53,7 +53,7 @@ type Describer interface { // ObjectDescriber is an interface for displaying arbitrary objects with extra // information. Use when an object is in hand (on disk, or already retrieved). -// Implementors may ignore the additional information passed on extra, or use it +// Implementers may ignore the additional information passed on extra, or use it // by default. ObjectDescribers may return ErrNoDescriber if no suitable describer // is found. type ObjectDescriber interface { diff --git a/pkg/kubelet/config/config_test.go b/pkg/kubelet/config/config_test.go index 76ef59f024b..0c148e32124 100644 --- a/pkg/kubelet/config/config_test.go +++ b/pkg/kubelet/config/config_test.go @@ -108,7 +108,7 @@ func expectPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate, expected ...ku if len(expected[i].Pods) != len(update.Pods) { t.Fatalf("Expected %#v, Got %#v", expected[i], update) } - // Compare pods one by one. This is necessary beacuse we don't want to + // Compare pods one by one. This is necessary because we don't want to // compare local annotations. for j := range expected[i].Pods { if podsDifferSemantically(expected[i].Pods[j], update.Pods[j]) || !reflect.DeepEqual(expected[i].Pods[j].Status, update.Pods[j].Status) { diff --git a/pkg/kubelet/container/cache.go b/pkg/kubelet/container/cache.go index 55722ec5799..219ad49f397 100644 --- a/pkg/kubelet/container/cache.go +++ b/pkg/kubelet/container/cache.go @@ -189,7 +189,7 @@ func (c *cache) subscribe(id types.UID, timestamp time.Time) chan *data { defer c.lock.Unlock() d := c.getIfNewerThan(id, timestamp) if d != nil { - // If the cache entry is ready, send the data and return immediatly. + // If the cache entry is ready, send the data and return immediately. ch <- d return ch } diff --git a/pkg/kubelet/container/sync_result.go b/pkg/kubelet/container/sync_result.go index 0faf701d7ae..1c3aa9eea90 100644 --- a/pkg/kubelet/container/sync_result.go +++ b/pkg/kubelet/container/sync_result.go @@ -114,7 +114,7 @@ func (p *PodSyncResult) AddPodSyncResult(result PodSyncResult) { p.SyncError = result.SyncError } -// Fail fails the PodSyncResult with an error occured in SyncPod() and KillPod() itself +// Fail fails the PodSyncResult with an error occurred in SyncPod() and KillPod() itself func (p *PodSyncResult) Fail(err error) { p.SyncError = err } diff --git a/pkg/kubelet/custommetrics/custom_metrics.go b/pkg/kubelet/custommetrics/custom_metrics.go index 4a01211a178..05a628a440b 100644 --- a/pkg/kubelet/custommetrics/custom_metrics.go +++ b/pkg/kubelet/custommetrics/custom_metrics.go @@ -37,7 +37,7 @@ func GetCAdvisorCustomMetricsDefinitionPath(container *api.Container) (*string, if container.VolumeMounts != nil { for _, volumeMount := range container.VolumeMounts { if path.Clean(volumeMount.MountPath) == path.Clean(CustomMetricsDefinitionDir) { - // TODO: add defintion file validation. + // TODO: add definition file validation. definitionPath := path.Clean(path.Join(volumeMount.MountPath, CustomMetricsDefinitionContainerFile)) return &definitionPath, nil } diff --git a/pkg/kubelet/disk_manager_test.go b/pkg/kubelet/disk_manager_test.go index 9f19906793b..e44d5297a38 100644 --- a/pkg/kubelet/disk_manager_test.go +++ b/pkg/kubelet/disk_manager_test.go @@ -196,7 +196,7 @@ func TestCache(t *testing.T) { assert.NoError(err) assert.True(ok) - // Ensure no more calls to the mockCadvisor occured + // Ensure no more calls to the mockCadvisor occurred assert.Equal(cadvisorCallCount, len(mockCadvisor.Calls)) } diff --git a/pkg/kubelet/dockertools/docker_test.go b/pkg/kubelet/dockertools/docker_test.go index 3edf47ab8ad..bcfef0d5bf4 100644 --- a/pkg/kubelet/dockertools/docker_test.go +++ b/pkg/kubelet/dockertools/docker_test.go @@ -704,7 +704,7 @@ func TestFindContainersByPod(t *testing.T) { } fakeClient := &FakeDockerClient{} np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil)) - // image back-off is set to nil, this test shouldnt pull images + // image back-off is set to nil, this test should not pull images containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, kubetypes.PodInfraContainerImage, 0, 0, "", kubecontainer.FakeOS{}, np, nil, nil, nil) for i, test := range tests { fakeClient.ContainerList = test.containerList diff --git a/pkg/kubelet/dockertools/fake_docker_client.go b/pkg/kubelet/dockertools/fake_docker_client.go index 7aa1545df3e..00d7be003bf 100644 --- a/pkg/kubelet/dockertools/fake_docker_client.go +++ b/pkg/kubelet/dockertools/fake_docker_client.go @@ -189,7 +189,7 @@ func (f *FakeDockerClient) ListContainers(options docker.ListContainersOptions) err := f.popError("list") containerList := append([]docker.APIContainers{}, f.ContainerList...) if options.All { - // Althought the container is not sorted, but the container with the same name should be in order, + // Although the container is not sorted, but the container with the same name should be in order, // that is enough for us now. // TODO(random-liu): Is a fully sorted array needed? containerList = append(containerList, f.ExitedContainerList...) diff --git a/pkg/kubelet/dockertools/manager.go b/pkg/kubelet/dockertools/manager.go index 38d30f511fd..7bd47ce82bf 100644 --- a/pkg/kubelet/dockertools/manager.go +++ b/pkg/kubelet/dockertools/manager.go @@ -1570,7 +1570,7 @@ func (dm *DockerManager) createPodInfraContainer(pod *api.Pod) (kubecontainer.Do } // No pod secrets for the infra container. - // The message isnt needed for the Infra container + // The message isn't needed for the Infra container if err, msg := dm.imagePuller.PullImage(pod, container, nil); err != nil { return "", err, msg } diff --git a/pkg/kubelet/dockertools/manager_test.go b/pkg/kubelet/dockertools/manager_test.go index afed48f834c..4165b9060be 100644 --- a/pkg/kubelet/dockertools/manager_test.go +++ b/pkg/kubelet/dockertools/manager_test.go @@ -913,7 +913,7 @@ func TestSyncPodsDoesNothing(t *testing.T) { runSyncPod(t, dm, fakeDocker, pod, nil, false) verifyCalls(t, fakeDocker, []string{ - // Check the pod infra contianer. + // Check the pod infra container. "inspect_container", }) } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index c4ab7a9cc0a..40fc96a3528 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -113,7 +113,7 @@ const ( etcHostsPath = "/etc/hosts" - // Capacity of the channel for recieving pod lifecycle events. This number + // Capacity of the channel for receiving pod lifecycle events. This number // is a bit arbitrary and may be adjusted in the future. plegChannelCapacity = 1000 @@ -1579,7 +1579,7 @@ func parseResolvConf(reader io.Reader, dnsScrubber dnsScrubber) (nameservers []s } // One of the following aruguements must be non-nil: runningPod, status. -// TODO: Modify containerRuntime.KillPod() to accept the right arguements. +// TODO: Modify containerRuntime.KillPod() to accept the right arguments. func (kl *Kubelet) killPod(pod *api.Pod, runningPod *kubecontainer.Pod, status *kubecontainer.PodStatus) error { var p kubecontainer.Pod if runningPod != nil { @@ -2114,7 +2114,7 @@ func (kl *Kubelet) HandlePodCleanups() error { glog.Errorf("Failed to cleanup terminated pods: %v", err) } - // Clear out any old bandwith rules + // Clear out any old bandwidth rules if err = kl.cleanupBandwidthLimits(allPods); err != nil { return err } diff --git a/pkg/kubelet/pleg/generic.go b/pkg/kubelet/pleg/generic.go index c603659ca33..cd5a0348907 100644 --- a/pkg/kubelet/pleg/generic.go +++ b/pkg/kubelet/pleg/generic.go @@ -101,7 +101,7 @@ func NewGenericPLEG(runtime kubecontainer.Runtime, channelCapacity int, } } -// Returns a channel from which the subscriber can recieve PodLifecycleEvent +// Returns a channel from which the subscriber can receive PodLifecycleEvent // events. // TODO: support multiple subscribers. func (g *GenericPLEG) Watch() chan *PodLifecycleEvent { diff --git a/pkg/kubelet/rkt/rkt_test.go b/pkg/kubelet/rkt/rkt_test.go index bbb655ee385..7c4b3b5c733 100644 --- a/pkg/kubelet/rkt/rkt_test.go +++ b/pkg/kubelet/rkt/rkt_test.go @@ -614,7 +614,7 @@ func TestGetPodStatus(t *testing.T) { Name: "guestbook", Namespace: "default", IP: "10.10.10.42", - // Result should contain all contianers. + // Result should contain all containers. ContainerStatuses: []*kubecontainer.ContainerStatus{ { ID: kubecontainer.BuildContainerID("rkt", "uuid-4002:app-1"), diff --git a/pkg/kubelet/util/queue/work_queue.go b/pkg/kubelet/util/queue/work_queue.go index 6870c18ace2..c54d93e627c 100644 --- a/pkg/kubelet/util/queue/work_queue.go +++ b/pkg/kubelet/util/queue/work_queue.go @@ -24,7 +24,7 @@ import ( "k8s.io/kubernetes/pkg/util" ) -// WorkQueue allows queueing items with a timestamp. An item is +// WorkQueue allows queuing items with a timestamp. An item is // considered ready to process if the timestamp has expired. type WorkQueue interface { // GetWork dequeues and returns all ready items. diff --git a/pkg/master/master.go b/pkg/master/master.go index 5abfbd20237..bda90e92b02 100644 --- a/pkg/master/master.go +++ b/pkg/master/master.go @@ -216,7 +216,7 @@ func (m *Master) InstallAPIs(c *Config) { extensionResources := m.getExtensionResources(c) extensionsGroupMeta := registered.GroupOrDie(extensions.GroupName) - // Update the prefered version as per StorageVersions in the config. + // Update the preferred version as per StorageVersions in the config. storageVersion, found := c.StorageVersions[extensionsGroupMeta.GroupVersion.Group] if !found { glog.Fatalf("Couldn't find storage version of group %v", extensionsGroupMeta.GroupVersion.Group) diff --git a/pkg/runtime/scheme.go b/pkg/runtime/scheme.go index 37bd985aad4..ac6cce3322d 100644 --- a/pkg/runtime/scheme.go +++ b/pkg/runtime/scheme.go @@ -142,7 +142,7 @@ func (s *Scheme) Converter() *conversion.Converter { // API group and version that would never be updated. // // TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into -// every version with particular schemas. Resolve tihs method at that point. +// every version with particular schemas. Resolve this method at that point. func (s *Scheme) AddUnversionedTypes(version unversioned.GroupVersion, types ...Object) { s.AddKnownTypes(version, types...) for _, obj := range types { diff --git a/pkg/runtime/types.go b/pkg/runtime/types.go index 3b8cede446a..470f32ff402 100644 --- a/pkg/runtime/types.go +++ b/pkg/runtime/types.go @@ -118,7 +118,7 @@ type Unstructured struct { // of an object during the decoding process. type VersionedObjects struct { // Objects is the set of objects retrieved during decoding, in order of conversion. - // The 0 index is the object as serialized on the wire. If conversion has occured, + // The 0 index is the object as serialized on the wire. If conversion has occurred, // other objects may be present. The right most object is the same as would be returned // by a normal Decode call. Objects []Object diff --git a/pkg/util/backoff.go b/pkg/util/backoff.go index 0ac526ec953..1426590ac59 100644 --- a/pkg/util/backoff.go +++ b/pkg/util/backoff.go @@ -127,7 +127,7 @@ func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { return entry } -// After 2*maxDuration we restart the backoff factor to the begining +// After 2*maxDuration we restart the backoff factor to the beginning func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration } diff --git a/pkg/util/bandwidth/linux.go b/pkg/util/bandwidth/linux.go index b5aeaa71487..edb480c6977 100644 --- a/pkg/util/bandwidth/linux.go +++ b/pkg/util/bandwidth/linux.go @@ -37,8 +37,8 @@ import ( // In general, using this requires that the caller posses the NET_CAP_ADMIN capability, though if you // do this within an container, it only requires the NS_CAPABLE capability for manipulations to that // container's network namespace. -// Uses the hierarchical token bucket queueing discipline (htb), this requires Linux 2.4.20 or newer -// or a custom kernel with that queueing discipline backported. +// Uses the hierarchical token bucket queuing discipline (htb), this requires Linux 2.4.20 or newer +// or a custom kernel with that queuing discipline backported. type tcShaper struct { e exec.Interface iface string diff --git a/pkg/util/deployment/deployment.go b/pkg/util/deployment/deployment.go index b02dcab1986..8bac6da0e42 100644 --- a/pkg/util/deployment/deployment.go +++ b/pkg/util/deployment/deployment.go @@ -108,7 +108,7 @@ func GetOldReplicaSetsFromLists(deployment extensions.Deployment, c clientset.In } // GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface. -// Returns nil if the new replica set doesnt exist yet. +// Returns nil if the new replica set doesn't exist yet. func GetNewReplicaSet(deployment extensions.Deployment, c clientset.Interface) (*extensions.ReplicaSet, error) { return GetNewReplicaSetFromList(deployment, c, func(namespace string, options api.ListOptions) ([]extensions.ReplicaSet, error) { @@ -118,7 +118,7 @@ func GetNewReplicaSet(deployment extensions.Deployment, c clientset.Interface) ( } // GetNewReplicaSetFromList returns a replica set that matches the intent of the given deployment; get ReplicaSetList with the input function. -// Returns nil if the new replica set doesnt exist yet. +// Returns nil if the new replica set doesn't exist yet. func GetNewReplicaSetFromList(deployment extensions.Deployment, c clientset.Interface, getRSList func(string, api.ListOptions) ([]extensions.ReplicaSet, error)) (*extensions.ReplicaSet, error) { namespace := deployment.ObjectMeta.Namespace selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) diff --git a/pkg/util/keymutex/keymutex.go b/pkg/util/keymutex/keymutex.go index 7e644c143b6..2bcc9517592 100644 --- a/pkg/util/keymutex/keymutex.go +++ b/pkg/util/keymutex/keymutex.go @@ -24,7 +24,7 @@ import ( // KeyMutex is a thread-safe interface for acquiring locks on arbitrary strings. type KeyMutex interface { - // Aquires a lock associated with the specified ID, creates the lock if one doesn't already exist. + // Acquires a lock associated with the specified ID, creates the lock if one doesn't already exist. LockKey(id string) // Releases the lock associated with the specified ID. @@ -44,7 +44,7 @@ type keyMutex struct { mutexMap map[string]*sync.Mutex } -// Aquires a lock associated with the specified ID (creates the lock if one doesn't already exist). +// Acquires a lock associated with the specified ID (creates the lock if one doesn't already exist). func (km *keyMutex) LockKey(id string) { glog.V(5).Infof("LockKey(...) called for id %q\r\n", id) mutex := km.getOrCreateLock(id) diff --git a/pkg/util/labels/labels.go b/pkg/util/labels/labels.go index f7f034bc27c..c32b862cd49 100644 --- a/pkg/util/labels/labels.go +++ b/pkg/util/labels/labels.go @@ -26,7 +26,7 @@ import ( // Returns the given map, if labelKey is empty. func CloneAndAddLabel(labels map[string]string, labelKey string, labelValue uint32) map[string]string { if labelKey == "" { - // Dont need to add a label. + // Don't need to add a label. return labels } // Clone. @@ -42,7 +42,7 @@ func CloneAndAddLabel(labels map[string]string, labelKey string, labelValue uint // Returns the given map, if labelKey is empty. func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]string { if labelKey == "" { - // Dont need to add a label. + // Don't need to add a label. return labels } // Clone. @@ -58,7 +58,7 @@ func CloneAndRemoveLabel(labels map[string]string, labelKey string) map[string]s // Returns the given selector, if labelKey is empty. func CloneSelectorAndAddLabel(selector *unversioned.LabelSelector, labelKey string, labelValue uint32) *unversioned.LabelSelector { if labelKey == "" { - // Dont need to add a label. + // Don't need to add a label. return selector } diff --git a/pkg/util/mount/mount_linux.go b/pkg/util/mount/mount_linux.go index c4ff0179974..66959697c57 100644 --- a/pkg/util/mount/mount_linux.go +++ b/pkg/util/mount/mount_linux.go @@ -284,7 +284,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, cmd := mounter.Runner.Command("mkfs."+fstype, args...) _, err := cmd.CombinedOutput() if err == nil { - // the disk has been formatted sucessfully try to mount it again. + // the disk has been formatted successfully try to mount it again. return mounter.Interface.Mount(source, target, fstype, options) } return err diff --git a/pkg/util/util.go b/pkg/util/util.go index 97303b76ff0..29284b2060d 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -122,7 +122,7 @@ func IntPtr(i int) *int { return &o } -// IntPtrDerefOr derefrence the int ptr and returns it i not nil, +// IntPtrDerefOr dereference the int ptr and returns it i not nil, // else returns def. func IntPtrDerefOr(ptr *int, def int) int { if ptr != nil { diff --git a/pkg/volume/flexvolume/flexvolume.go b/pkg/volume/flexvolume/flexvolume.go index 129bbb22447..86dbf3017cf 100644 --- a/pkg/volume/flexvolume/flexvolume.go +++ b/pkg/volume/flexvolume/flexvolume.go @@ -58,7 +58,7 @@ type flexVolumePlugin struct { host volume.VolumeHost } -// Init intializes the plugin. +// Init initializes the plugin. func (plugin *flexVolumePlugin) Init(host volume.VolumeHost) error { plugin.host = host // call the init script diff --git a/pkg/volume/util.go b/pkg/volume/util.go index 8df127cd531..7dc454d3cd1 100644 --- a/pkg/volume/util.go +++ b/pkg/volume/util.go @@ -140,7 +140,7 @@ func CalculateTimeoutForVolume(minimumTimeout, timeoutIncrement int, pv *api.Per } } -// RoundUpSize calculates how many allocation units are needed to accomodate +// RoundUpSize calculates how many allocation units are needed to accommodate // a volume of given size. E.g. when user wants 1500MiB volume, while AWS EBS // allocates volumes in gibibyte-sized chunks, // RoundUpSize(1500 * 1024*1024, 1024*1024*1024) returns '2' diff --git a/plugin/pkg/admission/namespace/lifecycle/admission_test.go b/plugin/pkg/admission/namespace/lifecycle/admission_test.go index 524694516a9..35647310331 100644 --- a/plugin/pkg/admission/namespace/lifecycle/admission_test.go +++ b/plugin/pkg/admission/namespace/lifecycle/admission_test.go @@ -118,7 +118,7 @@ func TestAdmission(t *testing.T) { t.Errorf("Did not expect an error %v", err) } - // verify create/update/delete of object in non-existant namespace throws error + // verify create/update/delete of object in non-existent namespace throws error err = handler.Admit(admission.NewAttributesRecord(&badPod, api.Kind("Pod"), badPod.Namespace, badPod.Name, api.Resource("pods"), "", admission.Create, nil)) if err == nil { t.Errorf("Expected an aerror that objects cannot be created in non-existant namespaces", err) diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 1959e1e794d..c7c349598fe 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -31,7 +31,7 @@ import ( "github.com/golang/glog" ) -// Amazon reccomends having no more that 40 volumes attached to an instance, +// Amazon recommends having no more that 40 volumes attached to an instance, // and at least one of those is for the system root volume. const DefaultMaxEBSVolumes = 39 diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 626d140cdbb..eb8db985c65 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -51,7 +51,7 @@ type Framework struct { NamespaceDeletionTimeout time.Duration gatherer containerResourceGatherer - // Constraints that passed to a check which is exectued after data is gathered to + // Constraints that passed to a check which is executed after data is gathered to // see if 99% of results are within acceptable bounds. It as to be injected in the test, // as expectations vary greatly. Constraints are groupped by the container names. addonResourceConstraints map[string]resourceConstraint diff --git a/test/e2e/kubelet_stats.go b/test/e2e/kubelet_stats.go index dffdb2591eb..63b991c811a 100644 --- a/test/e2e/kubelet_stats.go +++ b/test/e2e/kubelet_stats.go @@ -247,7 +247,7 @@ type resourceUsagePerNode map[string]resourceUsagePerContainer // write the actual interval used for calculation (based on the timestamps of // the stats points in containerResourceUsage.CPUInterval. // -// containerNames is a function returning a collection of contianer names in which +// containerNames is a function returning a collection of container names in which // user is interested in. ExpectMissingContainers is a flag which says if the test // should fail if one of containers listed by containerNames is missing on any node // (useful e.g. when looking for system containers or daemons). If set to true function diff --git a/test/e2e/util.go b/test/e2e/util.go index 5391c0131f5..9c934e20761 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -2079,7 +2079,7 @@ func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, d return false, err } if newRS == nil { - // New RC hasnt been created yet. + // New RC hasn't been created yet. return false, nil } allRSs := append(oldRSs, newRS)

QueryParameter

sinceTime

An RFC3339 timestamp from which to show logs. If this value preceeds the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.

An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.

false

string

template

object that describes the pod that will be created if insufficient replicas are detected; takes precendence over templateRef; see http://releases.k8s.io/HEAD/docs/replication-controller.md#pod-template

object that describes the pod that will be created if insufficient replicas are detected; takes precedence over templateRef; see http://releases.k8s.io/HEAD/docs/replication-controller.md#pod-template

false

v1.PodTemplateSpec