This commit is contained in:
Veres Lajos
2015-08-08 22:29:57 +01:00
parent 2bfa9a1f98
commit 9f77e49109
116 changed files with 160 additions and 160 deletions

View File

@@ -25,7 +25,7 @@
# LIMITATIONS
# 1. controllers are not updated unless their name is changed
# 3. Services will not be updated unless their name is changed,
# but for services we acually want updates without name change.
# but for services we actually want updates without name change.
# 4. Json files are not handled at all. Currently addons must be
# in yaml files
# 5. exit code is probably not always correct (I haven't checked

View File

@@ -25,7 +25,7 @@ kube_token_dir: "{{ kube_config_dir }}/tokens"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"
# This is the group that the cert creation scripts chgrp the
# cert files to. Not really changable...
# cert files to. Not really changeable...
kube_cert_group: kube-cert
# Internal DNS domain name.

View File

@@ -182,7 +182,7 @@ func parseTimeISO8601(s string) (time.Time, error) {
theTime.offMinute = v
s = s[2:]
default:
return time.Time{}, errors.New("an unknown error occured")
return time.Time{}, errors.New("an unknown error occurred")
}
state++
}

View File

@@ -1,4 +1,4 @@
# Alpine linux would be great for this, but it's DNS does not use seach paths.
# Alpine linux would be great for this, but it's DNS does not use search paths.
FROM progrium/busybox
MAINTAINER Tim Hockin "thockin@google.com"

View File

@@ -34,7 +34,7 @@ In this case, if there are problems launching a replacement scheduler process th
##### Command Line Arguments
- `--ha` is required to enable scheduler HA and multi-scheduler leader election.
- `--km_path` or else (`--executor_path` and `--proxy_path`) should reference non-local-file URI's and must be identicial across schedulers.
- `--km_path` or else (`--executor_path` and `--proxy_path`) should reference non-local-file URI's and must be identical across schedulers.
If you have HDFS installed on your slaves then you can specify HDFS URI locations for the binaries:

View File

@@ -263,7 +263,7 @@ func TestExecutorRegister(t *testing.T) {
}
// TestExecutorDisconnect ensures that the executor thinks that it is not
// connected after a call to Disconnected has occured.
// connected after a call to Disconnected has occurred.
func TestExecutorDisconnect(t *testing.T) {
mockDriver := &MockExecutorDriver{}
executor := NewTestKubernetesExecutor()
@@ -346,7 +346,7 @@ func TestExecutorLaunchAndKillTask(t *testing.T) {
select {
case <-updates:
case <-time.After(time.Second):
t.Fatalf("Executor should send an intial update on Registration")
t.Fatalf("Executor should send an initial update on Registration")
}
pod := NewTestPod(1)

View File

@@ -58,7 +58,7 @@ type FIFO interface {
// Pop waits until an item is ready and returns it. If multiple items are
// ready, they are returned in the order in which they were added/updated.
// The item is removed from the queue (and the store) before it is returned,
// so if you don't succesfully process it, you need to add it back with Add().
// so if you don't successfully process it, you need to add it back with Add().
Pop() interface{}
// Await attempts to Pop within the given interval; upon success the non-nil

View File

@@ -350,7 +350,7 @@ func (k *kubeScheduler) doSchedule(task *podtask.T, err error) (string, error) {
return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer)
}
// write resource limits into the pod spec which is transfered to the executor. From here
// write resource limits into the pod spec which is transferred to the executor. From here
// on we can expect that the pod spec of a task has proper limits for CPU and memory.
// TODO(sttts): For a later separation of the kubelet and the executor also patch the pod on the apiserver
if unlimitedCPU := mresource.LimitPodCPU(&task.Pod, k.defaultContainerCPULimit); unlimitedCPU {

View File

@@ -52,7 +52,7 @@ This is a v1 api based, containerized prometheus ReplicationController, which sc
1. Use kubectl to handle auth & proxy the kubernetes API locally, emulating the old KUBERNETES_RO service.
1. The list of services to be monitored is passed as a command line aguments in
1. The list of services to be monitored is passed as a command line arguments in
the yaml file.
1. The startup scripts assumes that each service T will have

View File

@@ -191,7 +191,7 @@ $ mysql -u root -ppassword --host 104.197.63.17 --port 3306 -e 'show databases;'
### Troubleshooting:
- If you can curl or netcat the endpoint from the pod (with kubectl exec) and not from the node, you have not specified hostport and containerport.
- If you can hit the ips from the node but not from your machine outside the cluster, you have not opened firewall rules for the right network.
- If you can't hit the ips from within the container, either haproxy or the service_loadbalacer script is not runing.
- If you can't hit the ips from within the container, either haproxy or the service_loadbalacer script is not running.
1. Use ps in the pod
2. sudo restart haproxy in the pod
3. cat /etc/haproxy/haproxy.cfg in the pod

View File

@@ -141,7 +141,7 @@ func TestGetServices(t *testing.T) {
{Port: 20, TargetPort: util.NewIntOrStringFromInt(ports[1])},
}
// 2 services targetting the same endpoints, one of which is declared as a tcp service.
// 2 services targeting the same endpoints, one of which is declared as a tcp service.
svc1 := getService(servicePorts)
svc2 := getService(servicePorts)
endpoints := []*api.Endpoints{