From a046fa478d33d17657bb3a240c714610c3d80ae4 Mon Sep 17 00:00:00 2001 From: Gurvinder Singh Date: Fri, 10 Jul 2015 11:19:55 +0200 Subject: [PATCH 01/49] modified spark example to use kubectl exec to interact with cluster and create spark driver pod --- examples/spark/README.md | 57 +++++++++++-------------- examples/spark/images/driver/Dockerfile | 4 ++ examples/spark/images/driver/README.md | 0 examples/spark/images/driver/start.sh | 9 ++++ examples/spark/spark-driver.json | 23 ++++++++++ 5 files changed, 61 insertions(+), 32 deletions(-) create mode 100644 examples/spark/images/driver/Dockerfile create mode 100644 examples/spark/images/driver/README.md create mode 100755 examples/spark/images/driver/start.sh create mode 100644 examples/spark/spark-driver.json diff --git a/examples/spark/README.md b/examples/spark/README.md index c401311269a..d2ac4575cde 100644 --- a/examples/spark/README.md +++ b/examples/spark/README.md @@ -110,44 +110,35 @@ $ kubectl logs spark-master 15/06/26 14:15:55 INFO Master: Registering worker 10.244.1.15:44839 with 1 cores, 2.6 GB RAM 15/06/26 14:15:55 INFO Master: Registering worker 10.244.0.19:60970 with 1 cores, 2.6 GB RAM ``` -## Step Three: Do something with the cluster -Get the address and port of the Master service. +## Step Three: Start your Spark driver to launch jobs on your Spark cluster + +The Spark driver is used to launch jobs into Spark cluster. You can read more about it in +[Spark architecture](http://spark.apache.org/docs/latest/cluster-overview.html). ```shell -$ kubectl get service spark-master -NAME LABELS SELECTOR IP(S) PORT(S) -spark-master name=spark-master name=spark-master 10.0.204.187 7077/TCP +$ kubectl create -f examples/spark/spark-driver.json +``` +The Spark driver needs the Master service to be running. + +### Check to see if the driver is running + +```shell +$ kubectl get pods +NAME READY REASON RESTARTS AGE +[...] +spark-master 1/1 Running 0 14m +spark-driver 1/1 Running 0 10m ``` -SSH to one of your cluster nodes. On GCE/GKE you can either use [Developers Console](https://console.developers.google.com) -(more details [here](https://cloud.google.com/compute/docs/ssh-in-browser)) -or run `gcloud compute ssh ` where the name can be taken from `kubectl get nodes` -(more details [here](https://cloud.google.com/compute/docs/gcloud-compute/#connecting)). +## Step Four: Do something with the cluster + +Use the kubectl exec to connect to Spark driver ``` -$ kubectl get nodes -NAME LABELS STATUS -kubernetes-minion-5jvu kubernetes.io/hostname=kubernetes-minion-5jvu Ready -kubernetes-minion-6fbi kubernetes.io/hostname=kubernetes-minion-6fbi Ready -kubernetes-minion-8y2v kubernetes.io/hostname=kubernetes-minion-8y2v Ready -kubernetes-minion-h0tr kubernetes.io/hostname=kubernetes-minion-h0tr Ready - -$ gcloud compute ssh kubernetes-minion-5jvu --zone=us-central1-b -Linux kubernetes-minion-5jvu 3.16.0-0.bpo.4-amd64 #1 SMP Debian 3.16.7-ckt9-3~deb8u1~bpo70+1 (2015-04-27) x86_64 - -=== GCE Kubernetes node setup complete === - -me@kubernetes-minion-5jvu:~$ -``` - -Once logged in run spark-base image. Inside of the image there is a script -that sets up the environment based on the provided IP and port of the Master. - -``` -cluster-node $ sudo docker run -it gcr.io/google_containers/spark-base -root@f12a6fec45ce:/# . /setup_client.sh 10.0.204.187 7077 -root@f12a6fec45ce:/# pyspark +$ kubectl exec spark-driver -it bash +root@spark-driver:/# +root@spark-driver:/# pyspark Python 2.7.9 (default, Mar 1 2015, 12:57:24) [GCC 4.9.2] on linux2 Type "help", "copyright", "credits" or "license" for more information. @@ -166,7 +157,7 @@ SparkContext available as sc, HiveContext available as sqlContext. ``` ## Result -You now have services, replication controllers, and pods for the Spark master and Spark workers. +You now have services, replication controllers, and pods for the Spark master , Spark driver and Spark workers. You can take this example to the next step and start using the Apache Spark cluster you just created, see [Spark documentation](https://spark.apache.org/documentation.html) for more information. @@ -181,4 +172,6 @@ Make sure the Master Pod is running (use: ```kubectl get pods```). ```kubectl create -f spark-worker-controller.json``` +```kubectl create -f spark-driver.json``` + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/README.md?pixel)]() diff --git a/examples/spark/images/driver/Dockerfile b/examples/spark/images/driver/Dockerfile new file mode 100644 index 00000000000..cfb1dad7df3 --- /dev/null +++ b/examples/spark/images/driver/Dockerfile @@ -0,0 +1,4 @@ +FROM gcr.io/google_containers/spark-base +ADD start.sh /start.sh +ADD log4j.properties /opt/spark/conf/log4j.properties +CMD ["/start.sh"] diff --git a/examples/spark/images/driver/README.md b/examples/spark/images/driver/README.md new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/spark/images/driver/start.sh b/examples/spark/images/driver/start.sh new file mode 100755 index 00000000000..495194dc38b --- /dev/null +++ b/examples/spark/images/driver/start.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +echo "$SPARK_MASTER_SERVICE_HOST spark-master" >> /etc/hosts +echo "SPARK_LOCAL_HOSTNAME=$(hostname -i)" >> /opt/spark/conf/spark-env.sh +echo "MASTER=spark://spark-master:$SPARK_MASTER_SERVICE_PORT" >> /opt/spark/conf/spark-env.sh + +while true; do +sleep 100 +done diff --git a/examples/spark/spark-driver.json b/examples/spark/spark-driver.json new file mode 100644 index 00000000000..ee695eeabcd --- /dev/null +++ b/examples/spark/spark-driver.json @@ -0,0 +1,23 @@ +{ + "kind": "Pod", + "apiVersion": "v1", + "metadata": { + "name": "spark-driver", + "labels": { + "name": "spark-driver" + } + }, + "spec": { + "containers": [ + { + "name": "spark-driver", + "image": "gurvin/spark-driver", + "resources": { + "limits": { + "cpu": "100m" + } + } + } + ] + } +} From bd9e09ba004649ef1c8eea455015e51a4fb41481 Mon Sep 17 00:00:00 2001 From: Max Forbes Date: Thu, 23 Jul 2015 19:00:27 -0700 Subject: [PATCH 02/49] Implement 'Nodes Network' test for GKE --- cluster/gke/util.sh | 5 ++--- hack/ginkgo-e2e.sh | 2 +- test/e2e/resize_nodes.go | 18 ++---------------- 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index bf8def55c92..ee55a9dd2ad 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -209,18 +209,17 @@ function get-password() { | grep password | cut -f 4 -d ' ') } -# Detect the instance name and IP for the master +# Detect the IP for the master. Note that on GKE, we don't know the name of the +# master, so KUBE_MASTER is not set. # # Assumed vars: # ZONE # CLUSTER_NAME # Vars set: -# KUBE_MASTER # KUBE_MASTER_IP function detect-master() { echo "... in detect-master()" >&2 detect-project >&2 - KUBE_MASTER="k8s-${CLUSTER_NAME}-master" KUBE_MASTER_IP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ | grep endpoint | cut -f 2 -d ' ') diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index d7e1b608f86..ce349e4b751 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -89,7 +89,7 @@ fi export PATH=$(dirname "${e2e_test}"):"${PATH}" "${ginkgo}" "${ginkgo_args[@]:+${ginkgo_args[@]}}" "${e2e_test}" -- \ "${auth_config[@]:+${auth_config[@]}}" \ - --host="https://${KUBE_MASTER_IP-}" \ + --host="https://${KUBE_MASTER_IP:-}" \ --provider="${KUBERNETES_PROVIDER}" \ --gce-project="${PROJECT:-}" \ --gce-zone="${ZONE:-}" \ diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index f3488187cdf..2048d7f948f 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -329,22 +329,8 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses) } By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) - - // TODO marekbiskup 2015-06-19 #10085 - // The use of MasterName will cause iptables to do a DNS lookup to - // resolve the name to an IP address, which will slow down the test - // and cause it to fail if DNS is absent or broken. - // Use the IP address instead. - - destination := testContext.CloudConfig.MasterName - if providerIs("aws") { - // This is the (internal) IP address used on AWS for the master - // TODO: Use IP address for all clouds? - // TODO: Avoid hard-coding this - destination = "172.20.0.9" - } - - iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump DROP", destination) + iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump DROP", + strings.TrimPrefix(testContext.Host, "https://")) defer func() { // This code will execute even if setting the iptables rule failed. // It is on purpose because we may have an error even if the new rule From 68f0db84cb60bc404b410820ea869b9d086f111d Mon Sep 17 00:00:00 2001 From: Gurvinder Singh Date: Sat, 25 Jul 2015 21:05:45 +0200 Subject: [PATCH 03/49] added indent for sleep --- examples/spark/images/driver/start.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/spark/images/driver/start.sh b/examples/spark/images/driver/start.sh index 495194dc38b..696c4dc644a 100755 --- a/examples/spark/images/driver/start.sh +++ b/examples/spark/images/driver/start.sh @@ -5,5 +5,5 @@ echo "SPARK_LOCAL_HOSTNAME=$(hostname -i)" >> /opt/spark/conf/spark-env.sh echo "MASTER=spark://spark-master:$SPARK_MASTER_SERVICE_PORT" >> /opt/spark/conf/spark-env.sh while true; do -sleep 100 + sleep 100 done From 5f48898498b4b066569bac39833689285d4a6a47 Mon Sep 17 00:00:00 2001 From: Gurvinder Singh Date: Sat, 25 Jul 2015 21:05:45 +0200 Subject: [PATCH 04/49] added indent for sleep --- examples/spark/images/driver/start.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/spark/images/driver/start.sh b/examples/spark/images/driver/start.sh index 495194dc38b..696c4dc644a 100755 --- a/examples/spark/images/driver/start.sh +++ b/examples/spark/images/driver/start.sh @@ -5,5 +5,5 @@ echo "SPARK_LOCAL_HOSTNAME=$(hostname -i)" >> /opt/spark/conf/spark-env.sh echo "MASTER=spark://spark-master:$SPARK_MASTER_SERVICE_PORT" >> /opt/spark/conf/spark-env.sh while true; do -sleep 100 + sleep 100 done From 5599d83685e046d5ea2079f63cd4380e8e7d3565 Mon Sep 17 00:00:00 2001 From: Gurvinder Singh Date: Mon, 27 Jul 2015 23:01:49 +0200 Subject: [PATCH 05/49] added standard copyright --- examples/spark/images/driver/start.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/examples/spark/images/driver/start.sh b/examples/spark/images/driver/start.sh index 696c4dc644a..13be069957f 100755 --- a/examples/spark/images/driver/start.sh +++ b/examples/spark/images/driver/start.sh @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + echo "$SPARK_MASTER_SERVICE_HOST spark-master" >> /etc/hosts echo "SPARK_LOCAL_HOSTNAME=$(hostname -i)" >> /opt/spark/conf/spark-env.sh echo "MASTER=spark://spark-master:$SPARK_MASTER_SERVICE_PORT" >> /opt/spark/conf/spark-env.sh From b37e7bbd7c9842223b751d9cfdff449727f6d2d9 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Mon, 27 Jul 2015 16:26:42 -0700 Subject: [PATCH 06/49] fix incorrect maintainer for getting started from scratch guide --- docs/getting-started-guides/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting-started-guides/README.md b/docs/getting-started-guides/README.md index 8e48a30d058..7506393a1fe 100644 --- a/docs/getting-started-guides/README.md +++ b/docs/getting-started-guides/README.md @@ -164,7 +164,7 @@ Local | | | _none_ | [docs](locally.md) libvirt/KVM | CoreOS | CoreOS | libvirt/KVM | [docs](libvirt-coreos.md) | | Community (@lhuard1A) oVirt | | | | [docs](ovirt.md) | | Community (@simon3z) Rackspace | CoreOS | CoreOS | flannel | [docs](rackspace.md) | | Community (@doublerr) -any | any | any | any | [docs](scratch.md) | | Community (@doublerr) +any | any | any | any | [docs](scratch.md) | | Community (@erictune) *Note*: The above table is ordered by version test/used in notes followed by support level. From b69ef7b5caaf5f5b2c06a2189dd258a493dba958 Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Tue, 28 Jul 2015 16:59:28 +0200 Subject: [PATCH 07/49] Remove shell services test --- hack/e2e-suite/services.sh | 481 --------------------------- hack/lib/golang.sh | 1 - pkg/kubectl/resource_printer_test.go | 3 +- test/e2e/shell.go | 50 --- 4 files changed, 1 insertion(+), 534 deletions(-) delete mode 100755 hack/e2e-suite/services.sh delete mode 100644 test/e2e/shell.go diff --git a/hack/e2e-suite/services.sh b/hack/e2e-suite/services.sh deleted file mode 100755 index 35d1cf01f95..00000000000 --- a/hack/e2e-suite/services.sh +++ /dev/null @@ -1,481 +0,0 @@ -#!/bin/bash - -# Copyright 2014 The Kubernetes Authors All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Verifies that services and virtual IPs work. - - -# TODO(wojtek-t): Remove this test once the following go tests are stable: -# - "should work after restarting kube-proxy" -# - "should work after restarting apiserver" - -set -o errexit -set -o nounset -set -o pipefail - -KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. - -: ${KUBE_VERSION_ROOT:=${KUBE_ROOT}} -: ${KUBECTL:="${KUBE_VERSION_ROOT}/cluster/kubectl.sh"} -: ${KUBE_CONFIG_FILE:="config-test.sh"} - -export KUBECTL KUBE_CONFIG_FILE - -TEST_NAMESPACE="services-test-${RANDOM}" -KUBECTL="${KUBECTL} --namespace=${TEST_NAMESPACE}" - -source "${KUBE_ROOT}/cluster/kube-env.sh" -source "${KUBE_VERSION_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh" - -prepare-e2e - -function error() { - echo "$@" >&2 - exit 1 -} - -function sort_args() { - [ $# == 0 ] && return - a=($(printf "%s\n" "$@" | sort -n)) - echo "${a[*]}" -} - -# Join args $2... with $1 between them. -# Example: join ", " x y z => x, y, z -function join() { - local sep item - sep=$1 - shift - echo -n "${1:-}" - shift - for item; do - echo -n "${sep}${item}" - done - echo -} - -svcs_to_clean=() -function do_teardown() { - ${KUBECTL} delete namespace "${TEST_NAMESPACE}" -} - -function make_namespace() { - echo "Making namespace '${TEST_NAMESPACE}'" - ${KUBECTL} create -f - << __EOF__ -{ - "kind": "Namespace", - "apiVersion": "v1", - "metadata": { - "name": "${TEST_NAMESPACE}" - } -} -__EOF__ -} - -wait_for_apiserver() { - echo "Waiting for apiserver to be up" - - local i - for i in $(seq 1 12); do - results=$(ssh-to-node "${master}" " - wget -q -T 1 -O - http://localhost:8080/healthz || true - ") - if [[ "${results}" == "ok" ]]; then - return - fi - sleep 5 # wait for apiserver to restart - done - error "restarting apiserver timed out" -} - -# Args: -# $1: service name -# $2: service port -# $3: service replica count -function start_service() { - echo "Starting service '${TEST_NAMESPACE}/$1' on port $2 with $3 replicas" - svcs_to_clean+=("$1") - ${KUBECTL} create -f - << __EOF__ -{ - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "$1", - "labels": { - "name": "$1" - } - }, - "spec": { - "replicas": $3, - "selector": { - "name": "$1" - }, - "template": { - "metadata": { - "labels": { - "name": "$1" - } - }, - "spec": { - "containers": [ - { - "name": "$1", - "image": "gcr.io/google_containers/serve_hostname:1.1", - "ports": [ - { - "containerPort": 9376, - "protocol": "TCP" - } - ] - } - ] - } - } - } -} -__EOF__ - ${KUBECTL} create -f - << __EOF__ -{ - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "$1", - "labels": { - "name": "$1" - } - }, - "spec": { - "ports": [ - { - "protocol": "TCP", - "port": $2, - "targetPort": 9376 - } - ], - "selector": { - "name": "$1" - } - } -} -__EOF__ -} - -# Args: -# $1: service name -function stop_service() { - echo "Stopping service '$1'" - ${KUBECTL} stop rc "$1" || true - ${KUBECTL} delete services "$1" || true -} - -# Args: -# $1: service name -# $2: expected pod count -function query_pods() { - # This fails very occasionally, so retry a bit. - local pods_unsorted=() - local i - for i in $(seq 1 10); do - pods_unsorted=($(${KUBECTL} get pods -o template \ - '--template={{range.items}}{{.metadata.name}} {{end}}' \ - '--api-version=v1' \ - -l name="$1")) - found="${#pods_unsorted[*]}" - if [[ "${found}" == "$2" ]]; then - break - fi - sleep 3 - done - if [[ "${found}" != "$2" ]]; then - error "Failed to query pods for $1: expected $2, found ${found}" - fi - - # The "return" is a sorted list of pod IDs. - sort_args "${pods_unsorted[@]}" -} - -# Args: -# $1: service name -# $2: pod count -function wait_for_pods() { - echo "Querying pods in $1" - local pods_sorted=$(query_pods "$1" "$2") - printf '\t%s\n' ${pods_sorted} - - # Container turn up on a clean cluster can take a while for the docker image - # pulls. Wait a generous amount of time. - # TODO: Sometimes pods change underneath us, which makes the GET fail (404). - # Maybe this test can be loosened and still be useful? - pods_needed=$2 - local i - for i in $(seq 1 30); do - echo "Waiting for ${pods_needed} pods to become 'running'" - pods_needed="$2" - for id in ${pods_sorted}; do - status=$(${KUBECTL} get pods "${id}" -o template --template='{{.status.phase}}' --api-version=v1) - if [[ "${status}" == "Running" ]]; then - pods_needed=$((pods_needed-1)) - fi - done - if [[ "${pods_needed}" == 0 ]]; then - break - fi - sleep 3 - done - if [[ "${pods_needed}" -gt 0 ]]; then - error "Pods for $1 did not come up in time" - fi -} - -# Args: -# $1: service name -# $2: service IP -# $3: service port -# $4: pod count -# $5: pod IDs (sorted) -function wait_for_service_up() { - local i - local found_pods - echo "waiting for $1 at $2:$3" - # TODO: Reduce this interval once we have a sense for the latency distribution. - for i in $(seq 1 10); do - results=($(ssh-to-node "${test_node}" " - set -e; - for i in $(seq -s' ' 1 $(($4*3))); do - wget -q -T 1 -O - http://$2:$3 || true; - echo; - done | sort -n | uniq - ")) - - found_pods=$(sort_args "${results[@]:+${results[@]}}") - if [[ "${found_pods}" == "$5" ]]; then - return - fi - echo "expected '$5', got '${found_pods}': will try again" - sleep 5 # wait for endpoints to propagate - done - error "$1: failed to verify portal from host" -} - -# Args: -# $1: service name -# $2: service IP -# $3: service port -function wait_for_service_down() { - local i - for i in $(seq 1 15); do - $(ssh-to-node "${test_node}" " - curl -s --connect-timeout 2 "http://$2:$3" >/dev/null 2>&1 && exit 1 || exit 0; - ") && break - echo "Waiting for $1 to go down" - sleep 2 - done -} - -# Args: -# $1: service name -# $2: service IP -# $3: service port -# $4: pod count -# $5: pod IDs (sorted) -function verify_from_container() { - local i - local found_pods - echo "waiting for $1 at $2:$3" - # TODO: Reduce this interval once we have a sense for the latency distribution. - for i in $(seq 1 10); do - results=($(ssh-to-node "${test_node}" " - set -e; - sudo docker pull gcr.io/google_containers/busybox >/dev/null; - sudo docker run gcr.io/google_containers/busybox sh -c ' - for i in $(seq -s' ' 1 $(($4*3))); do - wget -q -T 1 -O - http://$2:$3 || true; - echo; - done - '" | sort -n | uniq)) - - found_pods=$(sort_args "${results[@]:+${results[@]}}") - if [[ "${found_pods}" == "$5" ]]; then - return - fi - echo "expected '$5', got '${found_pods}': will try again" - sleep 5 # wait for endpoints to propagate - done - error "$1: failed to verify portal from host" -} - -trap do_teardown EXIT - -# Get node IP addresses and pick one as our test point. -detect-minions -test_node="${MINION_NAMES[0]}" -master="${MASTER_NAME}" - -# Make our namespace -make_namespace - -# Launch some pods and services. -svc1_name="service1" -svc1_port=80 -svc1_count=3 -start_service "${svc1_name}" "${svc1_port}" "${svc1_count}" - -svc2_name="service2" -svc2_port=80 -svc2_count=3 -start_service "${svc2_name}" "${svc2_port}" "${svc2_count}" - -# Wait for the pods to become "running". -wait_for_pods "${svc1_name}" "${svc1_count}" -wait_for_pods "${svc2_name}" "${svc2_count}" - -# Get the sorted lists of pods. -svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}") -svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}") - -# Get the VIP IPs. -svc1_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc1_name}" --api-version=v1) -test -n "${svc1_ip}" || error "Service1 IP is blank" -svc2_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc2_name}" --api-version=v1) -test -n "${svc2_ip}" || error "Service2 IP is blank" -if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then - error "VIPs conflict: ${svc1_ip}" -fi - -# -# Test 1: Prove that the service VIP is alive. -# -echo "Test 1: Prove that the service VIP is alive." -echo "Verifying the VIP from the host" -wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ - "${svc1_count}" "${svc1_pods}" -wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ - "${svc2_count}" "${svc2_pods}" -echo "Verifying the VIP from a container" -verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ - "${svc1_count}" "${svc1_pods}" -verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ - "${svc2_count}" "${svc2_pods}" - -# -# Test 2: Bounce the proxy and make sure the VIP comes back. -# -echo "Test 2: Bounce the proxy and make sure the VIP comes back." -echo "Restarting kube-proxy" -restart-kube-proxy "${test_node}" -echo "Verifying the VIP from the host" -wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ - "${svc1_count}" "${svc1_pods}" -wait_for_service_up "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ - "${svc2_count}" "${svc2_pods}" -echo "Verifying the VIP from a container" -verify_from_container "${svc1_name}" "${svc1_ip}" "${svc1_port}" \ - "${svc1_count}" "${svc1_pods}" -verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \ - "${svc2_count}" "${svc2_pods}" - -# -# Test 3: Stop one service and make sure it is gone. -# -echo "Test 3: Stop one service and make sure it is gone." -stop_service "${svc1_name}" -wait_for_service_down "${svc1_name}" "${svc1_ip}" "${svc1_port}" - -# -# Test 4: Bring up another service. -# TODO: Actually add a test to force re-use. -# -echo "Test 4: Bring up another service." -svc3_name="service3" -svc3_port=80 -svc3_count=3 -start_service "${svc3_name}" "${svc3_port}" "${svc3_count}" - -# Wait for the pods to become "running". -wait_for_pods "${svc3_name}" "${svc3_count}" - -# Get the sorted lists of pods. -svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}") - -# Get the VIP. -svc3_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc3_name}" --api-version=v1) -test -n "${svc3_ip}" || error "Service3 IP is blank" - -echo "Verifying the VIPs from the host" -wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ - "${svc3_count}" "${svc3_pods}" -echo "Verifying the VIPs from a container" -verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ - "${svc3_count}" "${svc3_pods}" - -# -# Test 5: Remove the iptables rules, make sure they come back. -# -echo "Test 5: Remove the iptables rules, make sure they come back." -echo "Manually removing iptables rules" -# Remove both the new and old style chains, in case we're testing on an old kubelet -ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-HOST || true" -ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true" -echo "Verifying the VIPs from the host" -wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ - "${svc3_count}" "${svc3_pods}" -echo "Verifying the VIPs from a container" -verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ - "${svc3_count}" "${svc3_pods}" - -# -# Test 6: Restart the master, make sure VIPs come back. -# -echo "Test 6: Restart the master, make sure VIPs come back." -echo "Restarting the master" -restart-apiserver "${master}" -wait_for_apiserver -echo "Verifying the VIPs from the host" -wait_for_service_up "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ - "${svc3_count}" "${svc3_pods}" -echo "Verifying the VIPs from a container" -verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \ - "${svc3_count}" "${svc3_pods}" - -# -# Test 7: Bring up another service, make sure it does not re-use IPs. -# -echo "Test 7: Bring up another service, make sure it does not re-use IPs." -svc4_name="service4" -svc4_port=80 -svc4_count=3 -start_service "${svc4_name}" "${svc4_port}" "${svc4_count}" - -# Wait for the pods to become "running". -wait_for_pods "${svc4_name}" "${svc4_count}" - -# Get the sorted lists of pods. -svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}") - -# Get the VIP. -svc4_ip=$(${KUBECTL} get services -o template '--template={{.spec.clusterIP}}' "${svc4_name}" --api-version=v1) -test -n "${svc4_ip}" || error "Service4 IP is blank" -if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then - error "VIPs conflict: ${svc4_ip}" -fi - -echo "Verifying the VIPs from the host" -wait_for_service_up "${svc4_name}" "${svc4_ip}" "${svc4_port}" \ - "${svc4_count}" "${svc4_pods}" -echo "Verifying the VIPs from a container" -verify_from_container "${svc4_name}" "${svc4_ip}" "${svc4_port}" \ - "${svc4_count}" "${svc4_pods}" - -exit 0 diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 5ee27ee917f..cd5f836880d 100644 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -86,7 +86,6 @@ readonly KUBE_TEST_PORTABLE=( contrib/for-tests/network-tester/rc.json contrib/for-tests/network-tester/service.json hack/e2e.go - hack/e2e-suite hack/e2e-internal hack/ginkgo-e2e.sh hack/lib diff --git a/pkg/kubectl/resource_printer_test.go b/pkg/kubectl/resource_printer_test.go index a1b44020f00..700f3257956 100644 --- a/pkg/kubectl/resource_printer_test.go +++ b/pkg/kubectl/resource_printer_test.go @@ -414,8 +414,7 @@ func TestTemplateStrings(t *testing.T) { "true", }, } - // The point of this test is to verify that the below template works. If you change this - // template, you need to update hack/e2e-suite/update.sh. + // The point of this test is to verify that the below template works. tmpl := `{{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "foo") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}` p, err := NewTemplatePrinter([]byte(tmpl)) if err != nil { diff --git a/test/e2e/shell.go b/test/e2e/shell.go deleted file mode 100644 index c52a706c4b5..00000000000 --- a/test/e2e/shell.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - "bytes" - "fmt" - "os/exec" - "path/filepath" - - . "github.com/onsi/ginkgo" -) - -var _ = Describe("Shell", func() { - It("should pass tests for services.sh", func() { - // This test requires: - // - SSH - // - master access - // ... so the provider check should be identical to the intersection of - // providers that provide those capabilities. - SkipUnlessProviderIs("gce") - runCmdTest(filepath.Join(testContext.RepoRoot, "hack/e2e-suite/services.sh")) - }) -}) - -// Runs the given cmd test. -func runCmdTest(path string) { - By(fmt.Sprintf("Running %v", path)) - cmd := exec.Command(path) - cmd.Stdout = bytes.NewBuffer(nil) - cmd.Stderr = cmd.Stdout - - if err := cmd.Run(); err != nil { - Fail(fmt.Sprintf("Error running %v:\nCommand output:\n%v\n", cmd, cmd.Stdout)) - } -} From edd21d1e7ac70fef924bdf62de88286d5493f48a Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Tue, 28 Jul 2015 11:58:06 -0400 Subject: [PATCH 08/49] Cleanup #11029 to use go autocast-after-type-switch (thanks to brendandburns for the suggestion) --- pkg/cloudprovider/aws/aws.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pkg/cloudprovider/aws/aws.go b/pkg/cloudprovider/aws/aws.go index 2067442f923..b738a3530a3 100644 --- a/pkg/cloudprovider/aws/aws.go +++ b/pkg/cloudprovider/aws/aws.go @@ -1694,10 +1694,9 @@ func (s *AWSCloud) ensureSecurityGroup(name string, description string, vpcID st createResponse, err := s.ec2.CreateSecurityGroup(createRequest) if err != nil { ignore := false - switch err.(type) { + switch err := err.(type) { case awserr.Error: - awsError := err.(awserr.Error) - if awsError.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries { + if err.Code() == "InvalidGroup.Duplicate" && attempt < MaxReadThenCreateRetries { glog.V(2).Infof("Got InvalidGroup.Duplicate while creating security group (race?); will retry") ignore = true } From 85b04f9e6c89bbc928bb52fdc1b61aa3f081389f Mon Sep 17 00:00:00 2001 From: Bryan Stenson Date: Tue, 28 Jul 2015 09:24:17 -0700 Subject: [PATCH 09/49] minor doc typo --- cluster/addons/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/addons/README.md b/cluster/addons/README.md index 3361f4a6496..a433445e460 100644 --- a/cluster/addons/README.md +++ b/cluster/addons/README.md @@ -12,7 +12,7 @@ the system will bring them back to the original state, in particular: On the cluster, the add-ons are kept in ```/etc/kubernetes/addons``` on the master node, in yaml files (json is not supported at the moment). A system daemon periodically checks if -the contents of this directory is consistent with the add-one objects on the API +the contents of this directory is consistent with the add-on objects on the API server. If any difference is spotted, the system updates the API objects accordingly. (Limitation: for now, the system compares only the names of objects in the directory and on the API server. So changes in parameters may not be From a84fa79a01e0ad279f451687ad6de2d260649b03 Mon Sep 17 00:00:00 2001 From: Cesar Wong Date: Fri, 26 Jun 2015 17:10:28 -0400 Subject: [PATCH 10/49] Use versioned objects for GET and CONNECT operations --- api/swagger-spec/v1.json | 216 ++++++++++++++++++++++++++++++++ pkg/apiserver/api_installer.go | 29 +++-- pkg/apiserver/apiserver_test.go | 72 ++++++++++- 3 files changed, 304 insertions(+), 13 deletions(-) diff --git a/api/swagger-spec/v1.json b/api/swagger-spec/v1.json index 8552cf8386a..fd65af7ce09 100644 --- a/api/swagger-spec/v1.json +++ b/api/swagger-spec/v1.json @@ -5706,6 +5706,54 @@ "summary": "connect GET requests to exec of Pod", "nickname": "connectGetNamespacedPodExec", "parameters": [ + { + "type": "boolean", + "paramType": "query", + "name": "stdin", + "description": "redirect the standard input stream of the pod for this call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stdout", + "description": "redirect the standard output stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stderr", + "description": "redirect the standard error stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "tty", + "description": "allocate a terminal for this exec call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "query", + "name": "container", + "description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "required": false, + "allowMultiple": false + }, + { + "type": "", + "paramType": "query", + "name": "command", + "description": "the command to execute; argv array; not executed within a shell", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -5736,6 +5784,54 @@ "summary": "connect POST requests to exec of Pod", "nickname": "connectPostNamespacedPodExec", "parameters": [ + { + "type": "boolean", + "paramType": "query", + "name": "stdin", + "description": "redirect the standard input stream of the pod for this call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stdout", + "description": "redirect the standard output stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "stderr", + "description": "redirect the standard error stream of the pod for this call; defaults to true", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "tty", + "description": "allocate a terminal for this exec call; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "string", + "paramType": "query", + "name": "container", + "description": "the container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "required": false, + "allowMultiple": false + }, + { + "type": "", + "paramType": "query", + "name": "command", + "description": "the command to execute; argv array; not executed within a shell", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -5780,6 +5876,30 @@ "required": false, "allowMultiple": false }, + { + "type": "string", + "paramType": "query", + "name": "container", + "description": "the container for which to stream logs; defaults to only container if there is one container in the pod", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "follow", + "description": "follow the log stream of the pod; defaults to false", + "required": false, + "allowMultiple": false + }, + { + "type": "boolean", + "paramType": "query", + "name": "previous", + "description": "return previous terminated container logs; defaults to false", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -5889,6 +6009,14 @@ "summary": "connect GET requests to proxy of Pod", "nickname": "connectGetNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -5919,6 +6047,14 @@ "summary": "connect POST requests to proxy of Pod", "nickname": "connectPostNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -5949,6 +6085,14 @@ "summary": "connect PUT requests to proxy of Pod", "nickname": "connectPutNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -5979,6 +6123,14 @@ "summary": "connect DELETE requests to proxy of Pod", "nickname": "connectDeleteNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6009,6 +6161,14 @@ "summary": "connect HEAD requests to proxy of Pod", "nickname": "connectHeadNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6039,6 +6199,14 @@ "summary": "connect OPTIONS requests to proxy of Pod", "nickname": "connectOptionsNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6075,6 +6243,14 @@ "summary": "connect GET requests to proxy of Pod", "nickname": "connectGetNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6113,6 +6289,14 @@ "summary": "connect POST requests to proxy of Pod", "nickname": "connectPostNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6151,6 +6335,14 @@ "summary": "connect PUT requests to proxy of Pod", "nickname": "connectPutNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6189,6 +6381,14 @@ "summary": "connect DELETE requests to proxy of Pod", "nickname": "connectDeleteNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6227,6 +6427,14 @@ "summary": "connect HEAD requests to proxy of Pod", "nickname": "connectHeadNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", @@ -6265,6 +6473,14 @@ "summary": "connect OPTIONS requests to proxy of Pod", "nickname": "connectOptionsNamespacedPodProxy", "parameters": [ + { + "type": "string", + "paramType": "query", + "name": "path", + "description": "URL path to use in proxy request to pod", + "required": false, + "allowMultiple": false + }, { "type": "string", "paramType": "path", diff --git a/pkg/apiserver/api_installer.go b/pkg/apiserver/api_installer.go index 3a53e3a671a..debd438936c 100644 --- a/pkg/apiserver/api_installer.go +++ b/pkg/apiserver/api_installer.go @@ -204,10 +204,11 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag } versionedStatus := indirectArbitraryPointer(versionedStatusPtr) var ( - getOptions runtime.Object - getOptionsKind string - getSubpath bool - getSubpathKey string + getOptions runtime.Object + versionedGetOptions runtime.Object + getOptionsKind string + getSubpath bool + getSubpathKey string ) if isGetterWithOptions { getOptions, getSubpath, getSubpathKey = getterWithOptions.NewGetOptions() @@ -215,14 +216,19 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if err != nil { return err } + versionedGetOptions, err = a.group.Creater.New(serverVersion, getOptionsKind) + if err != nil { + return err + } isGetter = true } var ( - connectOptions runtime.Object - connectOptionsKind string - connectSubpath bool - connectSubpathKey string + connectOptions runtime.Object + versionedConnectOptions runtime.Object + connectOptionsKind string + connectSubpath bool + connectSubpathKey string ) if isConnecter { connectOptions, connectSubpath, connectSubpathKey = connecter.NewConnectOptions() @@ -231,6 +237,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag if err != nil { return err } + versionedConnectOptions, err = a.group.Creater.New(serverVersion, connectOptionsKind) } } @@ -390,7 +397,7 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Returns(http.StatusOK, "OK", versionedObject). Writes(versionedObject) if isGetterWithOptions { - if err := addObjectParams(ws, route, getOptions); err != nil { + if err := addObjectParams(ws, route, versionedGetOptions); err != nil { return err } } @@ -561,8 +568,8 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag Produces("*/*"). Consumes("*/*"). Writes("string") - if connectOptions != nil { - if err := addObjectParams(ws, route, connectOptions); err != nil { + if versionedConnectOptions != nil { + if err := addObjectParams(ws, route, versionedConnectOptions); err != nil { return err } } diff --git a/pkg/apiserver/apiserver_test.go b/pkg/apiserver/apiserver_test.go index 2b23cb036a5..9b01efe127e 100644 --- a/pkg/apiserver/apiserver_test.go +++ b/pkg/apiserver/apiserver_test.go @@ -255,8 +255,8 @@ func (*SimpleRoot) IsAnAPIObject() {} type SimpleGetOptions struct { api.TypeMeta `json:",inline"` - Param1 string `json:"param1"` - Param2 string `json:"param2"` + Param1 string `json:"param1" description:"description for param1"` + Param2 string `json:"param2" description:"description for param2"` Path string `json:"atAPath"` } @@ -1078,6 +1078,47 @@ func TestGetBinary(t *testing.T) { } } +func validateSimpleGetOptionsParams(t *testing.T, route *restful.Route) { + // Validate name and description + expectedParams := map[string]string{ + "param1": "description for param1", + "param2": "description for param2", + "atAPath": "", + } + for _, p := range route.ParameterDocs { + data := p.Data() + if desc, exists := expectedParams[data.Name]; exists { + if desc != data.Description { + t.Errorf("unexpected description for parameter %s: %s\n", data.Name, data.Description) + } + delete(expectedParams, data.Name) + } + } + if len(expectedParams) > 0 { + t.Errorf("did not find all expected parameters: %#v", expectedParams) + } +} + +func TestGetWithOptionsRouteParams(t *testing.T) { + storage := map[string]rest.Storage{} + simpleStorage := GetWithOptionsRESTStorage{ + SimpleRESTStorage: &SimpleRESTStorage{}, + } + storage["simple"] = &simpleStorage + handler := handle(storage) + ws := handler.(*defaultAPIServer).container.RegisteredWebServices() + if len(ws) == 0 { + t.Fatal("no web services registered") + } + routes := ws[0].Routes() + for i := range routes { + if routes[i].Method == "GET" && routes[i].Operation == "readNamespacedSimple" { + validateSimpleGetOptionsParams(t, &routes[i]) + break + } + } +} + func TestGetWithOptions(t *testing.T) { storage := map[string]rest.Storage{} simpleStorage := GetWithOptionsRESTStorage{ @@ -1292,6 +1333,33 @@ func TestConnect(t *testing.T) { } } +func TestConnectWithOptionsRouteParams(t *testing.T) { + connectStorage := &ConnecterRESTStorage{ + connectHandler: &SimpleConnectHandler{}, + emptyConnectOptions: &SimpleGetOptions{}, + } + storage := map[string]rest.Storage{ + "simple": &SimpleRESTStorage{}, + "simple/connect": connectStorage, + } + handler := handle(storage) + ws := handler.(*defaultAPIServer).container.RegisteredWebServices() + if len(ws) == 0 { + t.Fatal("no web services registered") + } + routes := ws[0].Routes() + for i := range routes { + switch routes[i].Operation { + case "connectGetNamespacedSimpleConnect": + case "connectPostNamespacedSimpleConnect": + case "connectPutNamespacedSimpleConnect": + case "connectDeleteNamespacedSimpleConnect": + validateSimpleGetOptionsParams(t, &routes[i]) + + } + } +} + func TestConnectWithOptions(t *testing.T) { responseText := "Hello World" itemID := "theID" From e62c47db8c5776610baf9cf8de00e74d2f81ec2f Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Tue, 28 Jul 2015 14:36:56 -0400 Subject: [PATCH 11/49] Update kube-apiserver unit files for systemd --- contrib/init/systemd/environ/apiserver | 2 +- contrib/init/systemd/kube-apiserver.service | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/contrib/init/systemd/environ/apiserver b/contrib/init/systemd/environ/apiserver index d0b19005a67..3a2da7d1aff 100644 --- a/contrib/init/systemd/environ/apiserver +++ b/contrib/init/systemd/environ/apiserver @@ -5,7 +5,7 @@ # # The address on the local server to listen to. -KUBE_API_ADDRESS="--address=127.0.0.1" +KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1" # The port on the local server to listen on. # KUBE_API_PORT="--port=8080" diff --git a/contrib/init/systemd/kube-apiserver.service b/contrib/init/systemd/kube-apiserver.service index b9a7609b62f..781bc702b19 100644 --- a/contrib/init/systemd/kube-apiserver.service +++ b/contrib/init/systemd/kube-apiserver.service @@ -1,6 +1,7 @@ [Unit] Description=Kubernetes API Server Documentation=https://github.com/GoogleCloudPlatform/kubernetes +After=network.target [Service] EnvironmentFile=-/etc/kubernetes/config From d08ffb310124e805e8a9dc6e4bc45f1c35b87071 Mon Sep 17 00:00:00 2001 From: Miguel Perez Date: Tue, 28 Jul 2015 16:56:56 -0400 Subject: [PATCH 12/49] Fix file extension --- docs/user-guide/service-accounts.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/user-guide/service-accounts.md b/docs/user-guide/service-accounts.md index feac0186af3..76745b7cfec 100644 --- a/docs/user-guide/service-accounts.md +++ b/docs/user-guide/service-accounts.md @@ -81,7 +81,7 @@ kind: ServiceAccount metadata: name: build-robot EOF -$ kubectl create -f /tmp/serviceaccount.json +$ kubectl create -f /tmp/serviceaccount.yaml serviceaccounts/build-robot ``` From dfe3e8076b723d9ed0754ad94727cb798f669601 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Tue, 28 Jul 2015 21:58:26 -0700 Subject: [PATCH 13/49] Add go-flowrate dependency. --- Godeps/Godeps.json | 4 + .../mxk/go-flowrate/flowrate/flowrate.go | 267 ++++++++++++++++++ .../github.com/mxk/go-flowrate/flowrate/io.go | 133 +++++++++ .../mxk/go-flowrate/flowrate/io_test.go | 146 ++++++++++ .../mxk/go-flowrate/flowrate/util.go | 67 +++++ 5 files changed, 617 insertions(+) create mode 100644 Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/flowrate.go create mode 100644 Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io.go create mode 100644 Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io_test.go create mode 100644 Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/util.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 7da648bcdfd..cef354a20c1 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -425,6 +425,10 @@ "ImportPath": "github.com/mitchellh/mapstructure", "Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf" }, + { + "ImportPath": "github.com/mxk/go-flowrate/flowrate", + "Rev": "cca7078d478f8520f85629ad7c68962d31ed7682" + }, { "ImportPath": "github.com/onsi/ginkgo", "Comment": "v1.2.0-6-gd981d36", diff --git a/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/flowrate.go b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/flowrate.go new file mode 100644 index 00000000000..1b727721e14 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/flowrate.go @@ -0,0 +1,267 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io.go b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io.go new file mode 100644 index 00000000000..fbe0909725a --- /dev/null +++ b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io_test.go b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io_test.go new file mode 100644 index 00000000000..fa7f4b4ae07 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/io_test.go @@ -0,0 +1,146 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "bytes" + "reflect" + "testing" + "time" +) + +const ( + _50ms = 50 * time.Millisecond + _100ms = 100 * time.Millisecond + _200ms = 200 * time.Millisecond + _300ms = 300 * time.Millisecond + _400ms = 400 * time.Millisecond + _500ms = 500 * time.Millisecond +) + +func nextStatus(m *Monitor) Status { + samples := m.samples + for i := 0; i < 30; i++ { + if s := m.Status(); s.Samples != samples { + return s + } + time.Sleep(5 * time.Millisecond) + } + return m.Status() +} + +func TestReader(t *testing.T) { + in := make([]byte, 100) + for i := range in { + in[i] = byte(i) + } + b := make([]byte, 100) + r := NewReader(bytes.NewReader(in), 100) + start := time.Now() + + // Make sure r implements Limiter + _ = Limiter(r) + + // 1st read of 10 bytes is performed immediately + if n, err := r.Read(b); n != 10 || err != nil { + t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + // No new Reads allowed in the current sample + r.SetBlocking(false) + if n, err := r.Read(b); n != 0 || err != nil { + t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("r.Read(b) took too long (%v)", rt) + } + + status := [6]Status{0: r.Status()} // No samples in the first status + + // 2nd read of 10 bytes blocks until the next sample + r.SetBlocking(true) + if n, err := r.Read(b[10:]); n != 10 || err != nil { + t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _100ms { + t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) + } + + status[1] = r.Status() // 1st sample + status[2] = nextStatus(r.Monitor) // 2nd sample + status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample + + if n := r.Done(); n != 20 { + t.Fatalf("r.Done() expected 20; got %v", n) + } + + status[4] = r.Status() + status[5] = nextStatus(r.Monitor) // Timeout + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + Status{true, start, _100ms, 0, 10, 1, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _200ms, _100ms, 20, 2, 100, 100, 100, 100, 0, 0, 0}, + Status{true, start, _300ms, _200ms, 20, 3, 0, 90, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + Status{false, start, _300ms, 0, 20, 3, 0, 0, 67, 100, 0, 0, 0}, + } + for i, s := range status { + if !reflect.DeepEqual(&s, &want[i]) { + t.Errorf("r.Status(%v) expected %v; got %v", i, want[i], s) + } + } + if !bytes.Equal(b[:20], in[:20]) { + t.Errorf("r.Read() input doesn't match output") + } +} + +func TestWriter(t *testing.T) { + b := make([]byte, 100) + for i := range b { + b[i] = byte(i) + } + w := NewWriter(&bytes.Buffer{}, 200) + start := time.Now() + + // Make sure w implements Limiter + _ = Limiter(w) + + // Non-blocking 20-byte write for the first sample returns ErrLimit + w.SetBlocking(false) + if n, err := w.Write(b); n != 20 || err != ErrLimit { + t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) + } else if rt := time.Since(start); rt > _50ms { + t.Fatalf("w.Write(b) took too long (%v)", rt) + } + + // Blocking 80-byte write + w.SetBlocking(true) + if n, err := w.Write(b[20:]); n != 80 || err != nil { + t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) + } else if rt := time.Since(start); rt < _400ms { + t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) + } + + w.SetTransferSize(100) + status := []Status{w.Status(), nextStatus(w.Monitor)} + start = status[0].Start + + // Active, Start, Duration, Idle, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, TimeRem, Progress + want := []Status{ + Status{true, start, _400ms, 0, 80, 4, 200, 200, 200, 200, 20, _100ms, 80000}, + Status{true, start, _500ms, _100ms, 100, 5, 200, 200, 200, 200, 0, 0, 100000}, + } + for i, s := range status { + if !reflect.DeepEqual(&s, &want[i]) { + t.Errorf("w.Status(%v) expected %v; got %v", i, want[i], s) + } + } + if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { + t.Errorf("w.Write() input doesn't match output") + } +} diff --git a/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/util.go b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/util.go new file mode 100644 index 00000000000..4caac583fc0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/mxk/go-flowrate/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return time.Unix(0, int64(czero+c)) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} From 35cac3c4e7d095d3da52d4e49da161350bcc76a9 Mon Sep 17 00:00:00 2001 From: hurf Date: Wed, 29 Jul 2015 17:19:18 +0800 Subject: [PATCH 14/49] Deprecate kubectl stop command Added deprecation warning for stop. --- docs/man/man1/kubectl-stop.1 | 8 ++++++-- docs/user-guide/kubectl/kubectl.md | 4 ++-- docs/user-guide/kubectl/kubectl_stop.md | 9 ++++++--- pkg/kubectl/cmd/stop.go | 7 +++++-- 4 files changed, 19 insertions(+), 9 deletions(-) diff --git a/docs/man/man1/kubectl-stop.1 b/docs/man/man1/kubectl-stop.1 index dfdf732b334..51fc22d3a26 100644 --- a/docs/man/man1/kubectl-stop.1 +++ b/docs/man/man1/kubectl-stop.1 @@ -3,7 +3,7 @@ .SH NAME .PP -kubectl stop \- Gracefully shut down a resource by name or filename. +kubectl stop \- Deprecated: Gracefully shut down a resource by name or filename. .SH SYNOPSIS @@ -13,7 +13,11 @@ kubectl stop \- Gracefully shut down a resource by name or filename. .SH DESCRIPTION .PP -Gracefully shut down a resource by name or filename. +Deprecated: Gracefully shut down a resource by name or filename. + +.PP +stop command is deprecated, all its functionalities are covered by delete command. +See 'kubectl delete \-\-help' for more details. .PP Attempts to shut down and delete a resource that supports graceful termination. diff --git a/docs/user-guide/kubectl/kubectl.md b/docs/user-guide/kubectl/kubectl.md index 6ee5877fdb0..5cf0e4d5c4b 100644 --- a/docs/user-guide/kubectl/kubectl.md +++ b/docs/user-guide/kubectl/kubectl.md @@ -97,10 +97,10 @@ kubectl * [kubectl rolling-update](kubectl_rolling-update.md) - Perform a rolling update of the given ReplicationController. * [kubectl run](kubectl_run.md) - Run a particular image on the cluster. * [kubectl scale](kubectl_scale.md) - Set a new size for a Replication Controller. -* [kubectl stop](kubectl_stop.md) - Gracefully shut down a resource by name or filename. +* [kubectl stop](kubectl_stop.md) - Deprecated: Gracefully shut down a resource by name or filename. * [kubectl version](kubectl_version.md) - Print the client and server version information. -###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.96000791 +0000 UTC +###### Auto generated by spf13/cobra at 2015-07-29 09:18:59.541696918 +0000 UTC diff --git a/docs/user-guide/kubectl/kubectl_stop.md b/docs/user-guide/kubectl/kubectl_stop.md index ed65ad3fe4d..804ff034899 100644 --- a/docs/user-guide/kubectl/kubectl_stop.md +++ b/docs/user-guide/kubectl/kubectl_stop.md @@ -33,12 +33,15 @@ Documentation for other releases can be found at ## kubectl stop -Gracefully shut down a resource by name or filename. +Deprecated: Gracefully shut down a resource by name or filename. ### Synopsis -Gracefully shut down a resource by name or filename. +Deprecated: Gracefully shut down a resource by name or filename. + +stop command is deprecated, all its functionalities are covered by delete command. +See 'kubectl delete --help' for more details. Attempts to shut down and delete a resource that supports graceful termination. If the resource is scalable it will be scaled to 0 before deletion. @@ -108,7 +111,7 @@ $ kubectl stop -f path/to/resources * [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager -###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.957441942 +0000 UTC +###### Auto generated by spf13/cobra at 2015-07-29 09:18:59.539597953 +0000 UTC diff --git a/pkg/kubectl/cmd/stop.go b/pkg/kubectl/cmd/stop.go index b8b214419bb..2304876eb1a 100644 --- a/pkg/kubectl/cmd/stop.go +++ b/pkg/kubectl/cmd/stop.go @@ -27,7 +27,10 @@ import ( ) const ( - stop_long = `Gracefully shut down a resource by name or filename. + stop_long = `Deprecated: Gracefully shut down a resource by name or filename. + +stop command is deprecated, all its functionalities are covered by delete command. +See 'kubectl delete --help' for more details. Attempts to shut down and delete a resource that supports graceful termination. If the resource is scalable it will be scaled to 0 before deletion.` @@ -50,7 +53,7 @@ func NewCmdStop(f *cmdutil.Factory, out io.Writer) *cobra.Command { }{} cmd := &cobra.Command{ Use: "stop (-f FILENAME | RESOURCE (NAME | -l label | --all))", - Short: "Gracefully shut down a resource by name or filename.", + Short: "Deprecated: Gracefully shut down a resource by name or filename.", Long: stop_long, Example: stop_example, Run: func(cmd *cobra.Command, args []string) { From 01ec50deb44508585b6121be04472c9ac1cbe1cd Mon Sep 17 00:00:00 2001 From: derekwaynecarr Date: Mon, 27 Jul 2015 15:22:45 -0400 Subject: [PATCH 15/49] Vagrant passes conformance tests at HEAD --- docs/getting-started-guides/README.md | 4 +++- test/e2e/service.go | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/getting-started-guides/README.md b/docs/getting-started-guides/README.md index 8e48a30d058..dcebe5b4cc3 100644 --- a/docs/getting-started-guides/README.md +++ b/docs/getting-started-guides/README.md @@ -134,7 +134,7 @@ Here are all the solutions mentioned above in table form. IaaS Provider | Config. Mgmt | OS | Networking | Docs | Conforms | Support Level -------------------- | ------------ | ------ | ---------- | --------------------------------------------- | ---------| ---------------------------- GKE | | | GCE | [docs](https://cloud.google.com/container-engine) | | Commercial -Vagrant | Saltstack | Fedora | OVS | [docs](vagrant.md) | | Project +Vagrant | Saltstack | Fedora | OVS | [docs](vagrant.md) | [✓][2] | Project GCE | Saltstack | Debian | GCE | [docs](gce.md) | [✓][1] | Project Azure | CoreOS | CoreOS | Weave | [docs](coreos/azure/README.md) | | Community ([@errordeveloper](https://github.com/errordeveloper), [@squillace](https://github.com/squillace), [@chanezon](https://github.com/chanezon), [@crossorigin](https://github.com/crossorigin)) Docker Single Node | custom | N/A | local | [docs](docker.md) | | Project (@brendandburns) @@ -189,6 +189,8 @@ Definition of columns: [1]: https://gist.github.com/erictune/4cabc010906afbcc5061 + +[2]: https://gist.github.com/derekwaynecarr/505e56036cdf010bf6b6 diff --git a/test/e2e/service.go b/test/e2e/service.go index 45658356223..4b470b497e2 100644 --- a/test/e2e/service.go +++ b/test/e2e/service.go @@ -231,6 +231,8 @@ var _ = Describe("Services", func() { }) It("should be able to up and down services", func() { + // this test uses NodeSSHHosts that does not work if a Node only reports LegacyHostIP + SkipUnlessProviderIs("gce", "gke", "aws") ns := namespaces[0] numPods, servicePort := 3, 80 From 3b11705a994cf7e48248d11da0c0a579d7faa243 Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Mon, 27 Jul 2015 20:47:16 -0700 Subject: [PATCH 16/49] downgrade errors returned by watchHandler in reflector.go to warnnings --- pkg/client/cache/reflector.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/client/cache/reflector.go b/pkg/client/cache/reflector.go index 93612354e83..89b1c85f4eb 100644 --- a/pkg/client/cache/reflector.go +++ b/pkg/client/cache/reflector.go @@ -224,7 +224,7 @@ func (r *Reflector) listAndWatch(stopCh <-chan struct{}) { } if err := r.watchHandler(w, &resourceVersion, resyncCh, stopCh); err != nil { if err != errorResyncRequested && err != errorStopRequested { - util.HandleError(fmt.Errorf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)) + glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err) } return } From 124bb22f9209ef62b7c10d078f15c6b9b774fa0c Mon Sep 17 00:00:00 2001 From: markturansky Date: Fri, 26 Jun 2015 16:37:11 -0400 Subject: [PATCH 17/49] Honor ReadOnly flag from persistent-volume plugin --- pkg/volume/aws_ebs/aws_ebs.go | 7 ++++++- pkg/volume/gce_pd/gce_pd.go | 8 +++++++- pkg/volume/glusterfs/glusterfs.go | 15 +++++++++------ pkg/volume/iscsi/iscsi.go | 9 +++++++-- pkg/volume/persistent_claim/persistent_claim.go | 2 +- pkg/volume/plugins.go | 4 +++- pkg/volume/plugins_test.go | 2 +- pkg/volume/rbd/rbd.go | 15 +++++++++------ 8 files changed, 43 insertions(+), 19 deletions(-) diff --git a/pkg/volume/aws_ebs/aws_ebs.go b/pkg/volume/aws_ebs/aws_ebs.go index 89ab85dc1c7..84cc8d1bb63 100644 --- a/pkg/volume/aws_ebs/aws_ebs.go +++ b/pkg/volume/aws_ebs/aws_ebs.go @@ -45,6 +45,7 @@ type awsElasticBlockStorePlugin struct { } var _ volume.VolumePlugin = &awsElasticBlockStorePlugin{} +var _ volume.PersistentVolumePlugin = &awsElasticBlockStorePlugin{} const ( awsElasticBlockStorePluginName = "kubernetes.io/aws-ebs" @@ -74,11 +75,16 @@ func (plugin *awsElasticBlockStorePlugin) NewBuilder(spec *volume.Spec, pod *api } func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) { + // EBSs used directly in a pod have a ReadOnly flag set by the pod author. + // EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV + var readOnly bool var ebs *api.AWSElasticBlockStoreVolumeSource if spec.VolumeSource.AWSElasticBlockStore != nil { ebs = spec.VolumeSource.AWSElasticBlockStore + readOnly = ebs.ReadOnly } else { ebs = spec.PersistentVolumeSource.AWSElasticBlockStore + readOnly = spec.ReadOnly } volumeID := ebs.VolumeID @@ -87,7 +93,6 @@ func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, if ebs.Partition != 0 { partition = strconv.Itoa(ebs.Partition) } - readOnly := ebs.ReadOnly return &awsElasticBlockStore{ podUID: podUID, diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index e22442fd9f1..7489065d8e5 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -40,6 +40,7 @@ type gcePersistentDiskPlugin struct { } var _ volume.VolumePlugin = &gcePersistentDiskPlugin{} +var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{} const ( gcePersistentDiskPluginName = "kubernetes.io/gce-pd" @@ -70,11 +71,17 @@ func (plugin *gcePersistentDiskPlugin) NewBuilder(spec *volume.Spec, pod *api.Po } func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) { + // GCEPDs used directly in a pod have a ReadOnly flag set by the pod author. + // GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV + var readOnly bool + var gce *api.GCEPersistentDiskVolumeSource if spec.VolumeSource.GCEPersistentDisk != nil { gce = spec.VolumeSource.GCEPersistentDisk + readOnly = gce.ReadOnly } else { gce = spec.PersistentVolumeSource.GCEPersistentDisk + readOnly = spec.ReadOnly } pdName := gce.PDName @@ -83,7 +90,6 @@ func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, pod if gce.Partition != 0 { partition = strconv.Itoa(gce.Partition) } - readOnly := gce.ReadOnly return &gcePersistentDiskBuilder{ gcePersistentDisk: &gcePersistentDisk{ diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index d9f85967abf..8df45a7cbb7 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -39,6 +39,7 @@ type glusterfsPlugin struct { } var _ volume.VolumePlugin = &glusterfsPlugin{} +var _ volume.PersistentVolumePlugin = &glusterfsPlugin{} const ( glusterfsPluginName = "kubernetes.io/glusterfs" @@ -65,7 +66,7 @@ func (plugin *glusterfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode } func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { - source := plugin.getGlusterVolumeSource(spec) + source, _ := plugin.getGlusterVolumeSource(spec) ep_name := source.EndpointsName ns := pod.Namespace ep, err := plugin.host.GetKubeClient().Endpoints(ns).Get(ep_name) @@ -77,16 +78,18 @@ func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ vol return plugin.newBuilderInternal(spec, ep, pod, mounter, exec.New()) } -func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) *api.GlusterfsVolumeSource { +func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*api.GlusterfsVolumeSource, bool) { + // Glusterfs volumes used directly in a pod have a ReadOnly flag set by the pod author. + // Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV if spec.VolumeSource.Glusterfs != nil { - return spec.VolumeSource.Glusterfs + return spec.VolumeSource.Glusterfs, spec.VolumeSource.Glusterfs.ReadOnly } else { - return spec.PersistentVolumeSource.Glusterfs + return spec.PersistentVolumeSource.Glusterfs, spec.ReadOnly } } func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.Endpoints, pod *api.Pod, mounter mount.Interface, exe exec.Interface) (volume.Builder, error) { - source := plugin.getGlusterVolumeSource(spec) + source, readOnly := plugin.getGlusterVolumeSource(spec) return &glusterfsBuilder{ glusterfs: &glusterfs{ volName: spec.Name, @@ -96,7 +99,7 @@ func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.End }, hosts: ep, path: source.Path, - readonly: source.ReadOnly, + readonly: readOnly, exe: exe}, nil } diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 99064aa9df9..587b9e3865d 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -39,6 +39,7 @@ type iscsiPlugin struct { } var _ volume.VolumePlugin = &iscsiPlugin{} +var _ volume.PersistentVolumePlugin = &iscsiPlugin{} const ( iscsiPluginName = "kubernetes.io/iscsi" @@ -80,11 +81,16 @@ func (plugin *iscsiPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume. } func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) { + // iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author. + // iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV + var readOnly bool var iscsi *api.ISCSIVolumeSource if spec.VolumeSource.ISCSI != nil { iscsi = spec.VolumeSource.ISCSI + readOnly = iscsi.ReadOnly } else { iscsi = spec.PersistentVolumeSource.ISCSI + readOnly = spec.ReadOnly } lun := strconv.Itoa(iscsi.Lun) @@ -99,9 +105,8 @@ func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UI manager: manager, mounter: mounter, plugin: plugin}, - fsType: iscsi.FSType, - readOnly: iscsi.ReadOnly, + readOnly: readOnly, }, nil } diff --git a/pkg/volume/persistent_claim/persistent_claim.go b/pkg/volume/persistent_claim/persistent_claim.go index 486ad0d82bf..737f6b8443b 100644 --- a/pkg/volume/persistent_claim/persistent_claim.go +++ b/pkg/volume/persistent_claim/persistent_claim.go @@ -78,7 +78,7 @@ func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, return nil, err } - builder, err := plugin.host.NewWrapperBuilder(volume.NewSpecFromPersistentVolume(pv), pod, opts, mounter) + builder, err := plugin.host.NewWrapperBuilder(volume.NewSpecFromPersistentVolume(pv, spec.ReadOnly), pod, opts, mounter) if err != nil { glog.Errorf("Error creating builder for claim: %+v\n", claim.Name) return nil, err diff --git a/pkg/volume/plugins.go b/pkg/volume/plugins.go index d94807fd891..28f80bd3e8f 100644 --- a/pkg/volume/plugins.go +++ b/pkg/volume/plugins.go @@ -134,6 +134,7 @@ type Spec struct { Name string VolumeSource api.VolumeSource PersistentVolumeSource api.PersistentVolumeSource + ReadOnly bool } // NewSpecFromVolume creates an Spec from an api.Volume @@ -145,10 +146,11 @@ func NewSpecFromVolume(vs *api.Volume) *Spec { } // NewSpecFromPersistentVolume creates an Spec from an api.PersistentVolume -func NewSpecFromPersistentVolume(pv *api.PersistentVolume) *Spec { +func NewSpecFromPersistentVolume(pv *api.PersistentVolume, readOnly bool) *Spec { return &Spec{ Name: pv.Name, PersistentVolumeSource: pv.Spec.PersistentVolumeSource, + ReadOnly: readOnly, } } diff --git a/pkg/volume/plugins_test.go b/pkg/volume/plugins_test.go index 30ffd0755f2..a5930ac7ebb 100644 --- a/pkg/volume/plugins_test.go +++ b/pkg/volume/plugins_test.go @@ -43,7 +43,7 @@ func TestSpecSourceConverters(t *testing.T) { }, } - converted = NewSpecFromPersistentVolume(pv) + converted = NewSpecFromPersistentVolume(pv, false) if converted.PersistentVolumeSource.AWSElasticBlockStore == nil { t.Errorf("Unexpected nil AWSElasticBlockStore: %+v", converted) } diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index 1bcb12fc708..e5eac04c82f 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -39,6 +39,7 @@ type rbdPlugin struct { } var _ volume.VolumePlugin = &rbdPlugin{} +var _ volume.PersistentVolumePlugin = &rbdPlugin{} const ( rbdPluginName = "kubernetes.io/rbd" @@ -74,7 +75,7 @@ func (plugin *rbdPlugin) GetAccessModes() []api.PersistentVolumeAccessMode { func (plugin *rbdPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { secret := "" - source := plugin.getRBDVolumeSource(spec) + source, _ := plugin.getRBDVolumeSource(spec) if source.SecretRef != nil { kubeClient := plugin.host.GetKubeClient() @@ -97,16 +98,18 @@ func (plugin *rbdPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.Vo return plugin.newBuilderInternal(spec, pod.UID, &RBDUtil{}, mounter, secret) } -func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) *api.RBDVolumeSource { +func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*api.RBDVolumeSource, bool) { + // rbd volumes used directly in a pod have a ReadOnly flag set by the pod author. + // rbd volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV if spec.VolumeSource.RBD != nil { - return spec.VolumeSource.RBD + return spec.VolumeSource.RBD, spec.VolumeSource.RBD.ReadOnly } else { - return spec.PersistentVolumeSource.RBD + return spec.PersistentVolumeSource.RBD, spec.ReadOnly } } func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) { - source := plugin.getRBDVolumeSource(spec) + source, readOnly := plugin.getRBDVolumeSource(spec) pool := source.RBDPool if pool == "" { pool = "rbd" @@ -126,7 +129,7 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, volName: spec.Name, Image: source.RBDImage, Pool: pool, - ReadOnly: source.ReadOnly, + ReadOnly: readOnly, manager: manager, mounter: mounter, plugin: plugin, From fae67594903f95e072b9d6e57f4d8fa010766865 Mon Sep 17 00:00:00 2001 From: markturansky Date: Mon, 29 Jun 2015 12:54:43 -0400 Subject: [PATCH 18/49] IsReadOnly bool on builder --- pkg/volume/aws_ebs/aws_ebs.go | 4 ++ pkg/volume/aws_ebs/aws_ebs_test.go | 49 +++++++++++++++++++ pkg/volume/empty_dir/empty_dir.go | 4 ++ pkg/volume/gce_pd/gce_pd.go | 4 ++ pkg/volume/git_repo/git_repo.go | 4 ++ pkg/volume/glusterfs/glusterfs.go | 11 +++-- pkg/volume/host_path/host_path.go | 8 +++ pkg/volume/iscsi/iscsi.go | 4 ++ pkg/volume/nfs/nfs.go | 9 ++++ .../persistent_claim/persistent_claim.go | 10 +++- pkg/volume/rbd/rbd.go | 4 ++ pkg/volume/secret/secret.go | 4 ++ pkg/volume/testing.go | 4 ++ pkg/volume/volume.go | 3 ++ .../persistent_volume_recycler.go | 2 +- 15 files changed, 118 insertions(+), 6 deletions(-) diff --git a/pkg/volume/aws_ebs/aws_ebs.go b/pkg/volume/aws_ebs/aws_ebs.go index 84cc8d1bb63..b8b60a7c33c 100644 --- a/pkg/volume/aws_ebs/aws_ebs.go +++ b/pkg/volume/aws_ebs/aws_ebs.go @@ -240,6 +240,10 @@ func (ebs *awsElasticBlockStore) SetUpAt(dir string) error { return nil } +func (pd *awsElasticBlockStore) IsReadOnly() bool { + return pd.readOnly +} + func makeGlobalPDPath(host volume.VolumeHost, volumeID string) string { // Clean up the URI to be more fs-friendly name := volumeID diff --git a/pkg/volume/aws_ebs/aws_ebs_test.go b/pkg/volume/aws_ebs/aws_ebs_test.go index a5017a0a896..2e7727c6919 100644 --- a/pkg/volume/aws_ebs/aws_ebs_test.go +++ b/pkg/volume/aws_ebs/aws_ebs_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" @@ -157,3 +159,50 @@ func TestPlugin(t *testing.T) { t.Errorf("SetUp() failed: %v", err) } } + +func TestPersistentClaimReadOnlyFlag(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pvA", + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{}, + }, + ClaimRef: &api.ObjectReference{ + Name: "claimA", + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claimA", + Namespace: "nsA", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "pvA", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + + o := testclient.NewObjects(api.Scheme, api.Scheme) + o.Add(pv) + o.Add(claim) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName) + spec := volume.NewSpecFromPersistentVolume(pv, false) + + pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) + + if builder.IsReadOnly() { + t.Errorf("Expected false for builder.IsReadOnly") + } + +} diff --git a/pkg/volume/empty_dir/empty_dir.go b/pkg/volume/empty_dir/empty_dir.go index 8bcf325e13e..6a189a62841 100644 --- a/pkg/volume/empty_dir/empty_dir.go +++ b/pkg/volume/empty_dir/empty_dir.go @@ -143,6 +143,10 @@ func (ed *emptyDir) SetUpAt(dir string) error { } } +func (ed *emptyDir) IsReadOnly() bool { + return false +} + func (ed *emptyDir) setupDefault(dir string) error { return os.MkdirAll(dir, 0750) } diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index 7489065d8e5..8969893592a 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -229,6 +229,10 @@ func (b *gcePersistentDiskBuilder) SetUpAt(dir string) error { return nil } +func (pd *gcePersistentDisk) IsReadOnly() bool { + return pd.readOnly +} + func makeGlobalPDName(host volume.VolumeHost, devName string) string { return path.Join(host.GetPluginDir(gcePersistentDiskPluginName), "mounts", devName) } diff --git a/pkg/volume/git_repo/git_repo.go b/pkg/volume/git_repo/git_repo.go index 92627a9c7f1..135110b8aa8 100644 --- a/pkg/volume/git_repo/git_repo.go +++ b/pkg/volume/git_repo/git_repo.go @@ -118,6 +118,10 @@ func (b *gitRepoVolumeBuilder) SetUp() error { return b.SetUpAt(b.GetPath()) } +func (gr *gitRepo) IsReadOnly() bool { + return false +} + // This is the spec for the volume that this plugin wraps. var wrappedVolumeSpec = &volume.Spec{ Name: "not-used", diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 8df45a7cbb7..3d9626c66d9 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -99,7 +99,7 @@ func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.End }, hosts: ep, path: source.Path, - readonly: readOnly, + readOnly: readOnly, exe: exe}, nil } @@ -128,7 +128,8 @@ type glusterfsBuilder struct { *glusterfs hosts *api.Endpoints path string - readonly bool + readOnly bool + mounter mount.Interface exe exec.Interface } @@ -161,6 +162,10 @@ func (b *glusterfsBuilder) SetUpAt(dir string) error { return err } +func (glusterfsVolume *glusterfs) IsReadOnly() bool { + return glusterfsVolume.readOnly +} + func (glusterfsVolume *glusterfs) GetPath() string { name := glusterfsPluginName return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.pod.UID, util.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName) @@ -212,7 +217,7 @@ func (b *glusterfsBuilder) setUpAtInternal(dir string) error { var errs error options := []string{} - if b.readonly { + if glusterfsVolume.readOnly { options = append(options, "ro") } diff --git a/pkg/volume/host_path/host_path.go b/pkg/volume/host_path/host_path.go index d369e3c0b79..3768740509c 100644 --- a/pkg/volume/host_path/host_path.go +++ b/pkg/volume/host_path/host_path.go @@ -119,6 +119,14 @@ func (b *hostPathBuilder) SetUpAt(dir string) error { return fmt.Errorf("SetUpAt() does not make sense for host paths") } +func (b *hostPathBuilder) IsReadOnly() bool { + return false +} + +func (b *hostPathBuilder) GetPath() string { + return b.path +} + type hostPathCleaner struct { *hostPath } diff --git a/pkg/volume/iscsi/iscsi.go b/pkg/volume/iscsi/iscsi.go index 587b9e3865d..05fdccd380d 100644 --- a/pkg/volume/iscsi/iscsi.go +++ b/pkg/volume/iscsi/iscsi.go @@ -183,6 +183,10 @@ type iscsiDiskCleaner struct { var _ volume.Cleaner = &iscsiDiskCleaner{} +func (b *iscsiDiskBuilder) IsReadOnly() bool { + return b.readOnly +} + // Unmounts the bind mount, and detaches the disk only if the disk // resource was the last reference to that disk on the kubelet. func (c *iscsiDiskCleaner) TearDown() error { diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index 8776200b268..54151817dbd 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -188,6 +188,15 @@ type nfsCleaner struct { *nfs } +func (nfsVolume *nfs) IsReadOnly() bool { + return nfsVolume.readOnly +} + +func (nfsVolume *nfs) GetPath() string { + name := nfsPluginName + return nfsVolume.plugin.host.GetPodVolumeDir(nfsVolume.pod.UID, util.EscapeQualifiedNameForDisk(name), nfsVolume.volName) +} + var _ volume.Cleaner = &nfsCleaner{} func (c *nfsCleaner) TearDown() error { diff --git a/pkg/volume/persistent_claim/persistent_claim.go b/pkg/volume/persistent_claim/persistent_claim.go index 737f6b8443b..55d48ac4be5 100644 --- a/pkg/volume/persistent_claim/persistent_claim.go +++ b/pkg/volume/persistent_claim/persistent_claim.go @@ -26,11 +26,12 @@ import ( ) func ProbeVolumePlugins() []volume.VolumePlugin { - return []volume.VolumePlugin{&persistentClaimPlugin{nil}} + return []volume.VolumePlugin{&persistentClaimPlugin{host: nil}} } type persistentClaimPlugin struct { - host volume.VolumeHost + host volume.VolumeHost + readOnly bool } var _ volume.VolumePlugin = &persistentClaimPlugin{} @@ -52,6 +53,7 @@ func (plugin *persistentClaimPlugin) CanSupport(spec *volume.Spec) bool { } func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { + plugin.readOnly = spec.ReadOnly claim, err := plugin.host.GetKubeClient().PersistentVolumeClaims(pod.Namespace).Get(spec.VolumeSource.PersistentVolumeClaim.ClaimName) if err != nil { glog.Errorf("Error finding claim: %+v\n", spec.VolumeSource.PersistentVolumeClaim.ClaimName) @@ -87,6 +89,10 @@ func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, return builder, nil } +func (plugin *persistentClaimPlugin) IsReadOnly() bool { + return plugin.readOnly +} + func (plugin *persistentClaimPlugin) NewCleaner(_ string, _ types.UID, _ mount.Interface) (volume.Cleaner, error) { return nil, fmt.Errorf("This will never be called directly. The PV backing this claim has a cleaner. Kubelet uses that cleaner, not this one, when removing orphaned volumes.") } diff --git a/pkg/volume/rbd/rbd.go b/pkg/volume/rbd/rbd.go index e5eac04c82f..e3a32967140 100644 --- a/pkg/volume/rbd/rbd.go +++ b/pkg/volume/rbd/rbd.go @@ -216,6 +216,10 @@ type rbdCleaner struct { var _ volume.Cleaner = &rbdCleaner{} +func (b *rbd) IsReadOnly() bool { + return b.ReadOnly +} + // Unmounts the bind mount, and detaches the disk only if the disk // resource was the last reference to that disk on the kubelet. func (c *rbdCleaner) TearDown() error { diff --git a/pkg/volume/secret/secret.go b/pkg/volume/secret/secret.go index 26a1b0eba94..83a4fff64d3 100644 --- a/pkg/volume/secret/secret.go +++ b/pkg/volume/secret/secret.go @@ -168,6 +168,10 @@ func (b *secretVolumeBuilder) SetUpAt(dir string) error { return nil } +func (sv *secretVolume) IsReadOnly() bool { + return false +} + func totalSecretBytes(secret *api.Secret) int { totalSize := 0 for _, bytes := range secret.Data { diff --git a/pkg/volume/testing.go b/pkg/volume/testing.go index 3ac6d9736e9..fa2f532ae55 100644 --- a/pkg/volume/testing.go +++ b/pkg/volume/testing.go @@ -127,6 +127,10 @@ func (fv *FakeVolume) SetUpAt(dir string) error { return os.MkdirAll(dir, 0750) } +func (fv *FakeVolume) IsReadOnly() bool { + return false +} + func (fv *FakeVolume) GetPath() string { return path.Join(fv.Plugin.Host.GetPodVolumeDir(fv.PodUID, util.EscapeQualifiedNameForDisk(fv.Plugin.PluginName), fv.VolName)) } diff --git a/pkg/volume/volume.go b/pkg/volume/volume.go index ee668400a87..ff2bf29b944 100644 --- a/pkg/volume/volume.go +++ b/pkg/volume/volume.go @@ -41,6 +41,9 @@ type Builder interface { // directory path, which may or may not exist yet. This may be called // more than once, so implementations must be idempotent. SetUpAt(dir string) error + // IsReadOnly is a flag that gives the builder's ReadOnly attribute. + // All persistent volumes have a private readOnly flag in their builders. + IsReadOnly() bool } // Cleaner interface provides methods to cleanup/unmount the volumes. diff --git a/pkg/volumeclaimbinder/persistent_volume_recycler.go b/pkg/volumeclaimbinder/persistent_volume_recycler.go index c1b6bff0f22..a65131aa8b7 100644 --- a/pkg/volumeclaimbinder/persistent_volume_recycler.go +++ b/pkg/volumeclaimbinder/persistent_volume_recycler.go @@ -123,7 +123,7 @@ func (recycler *PersistentVolumeRecycler) handleRecycle(pv *api.PersistentVolume currentPhase := pv.Status.Phase nextPhase := currentPhase - spec := volume.NewSpecFromPersistentVolume(pv) + spec := volume.NewSpecFromPersistentVolume(pv, false) plugin, err := recycler.pluginMgr.FindRecyclablePluginBySpec(spec) if err != nil { return fmt.Errorf("Could not find recyclable volume plugin for spec: %+v", err) From 63ccfa2bebffb5a51a6fa5c7562ea72df0e4b359 Mon Sep 17 00:00:00 2001 From: markturansky Date: Wed, 1 Jul 2015 10:50:39 -0400 Subject: [PATCH 19/49] Added unit tests for each PV using IsReadOnly --- pkg/volume/aws_ebs/aws_ebs_test.go | 8 +-- pkg/volume/gce_pd/gce_pd_test.go | 49 +++++++++++++++ pkg/volume/glusterfs/glusterfs_test.go | 62 ++++++++++++++++++- pkg/volume/host_path/host_path.go | 15 +++-- pkg/volume/host_path/host_path_test.go | 49 +++++++++++++++ pkg/volume/iscsi/iscsi_test.go | 56 ++++++++++++++++- pkg/volume/nfs/nfs.go | 7 ++- pkg/volume/nfs/nfs_test.go | 51 ++++++++++++++- .../persistent_claim/persistent_claim.go | 1 - pkg/volume/rbd/rbd_test.go | 55 +++++++++++++++- 10 files changed, 338 insertions(+), 15 deletions(-) diff --git a/pkg/volume/aws_ebs/aws_ebs_test.go b/pkg/volume/aws_ebs/aws_ebs_test.go index 2e7727c6919..97bbc9b6b0b 100644 --- a/pkg/volume/aws_ebs/aws_ebs_test.go +++ b/pkg/volume/aws_ebs/aws_ebs_test.go @@ -196,13 +196,13 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName) - spec := volume.NewSpecFromPersistentVolume(pv, false) + // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes + spec := volume.NewSpecFromPersistentVolume(pv, true) pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) - if builder.IsReadOnly() { - t.Errorf("Expected false for builder.IsReadOnly") + if !builder.IsReadOnly() { + t.Errorf("Expected true for builder.IsReadOnly") } - } diff --git a/pkg/volume/gce_pd/gce_pd_test.go b/pkg/volume/gce_pd/gce_pd_test.go index cf7c89e5097..c95b841ec77 100644 --- a/pkg/volume/gce_pd/gce_pd_test.go +++ b/pkg/volume/gce_pd/gce_pd_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" @@ -171,3 +173,50 @@ func TestPlugin(t *testing.T) { t.Errorf("Detach watch not called") } } + +func TestPersistentClaimReadOnlyFlag(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pvA", + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{}, + }, + ClaimRef: &api.ObjectReference{ + Name: "claimA", + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claimA", + Namespace: "nsA", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "pvA", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + + o := testclient.NewObjects(api.Scheme, api.Scheme) + o.Add(pv) + o.Add(claim) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName) + + // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes + spec := volume.NewSpecFromPersistentVolume(pv, true) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) + + if !builder.IsReadOnly() { + t.Errorf("Expected true for builder.IsReadOnly") + } +} diff --git a/pkg/volume/glusterfs/glusterfs_test.go b/pkg/volume/glusterfs/glusterfs_test.go index 57baf671894..6961e09ab79 100644 --- a/pkg/volume/glusterfs/glusterfs_test.go +++ b/pkg/volume/glusterfs/glusterfs_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" @@ -153,5 +155,63 @@ func TestPluginPersistentVolume(t *testing.T) { }, } - doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol)) + doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) +} + +func TestPersistentClaimReadOnlyFlag(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pvA", + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + Glusterfs: &api.GlusterfsVolumeSource{"ep", "vol", false}, + }, + ClaimRef: &api.ObjectReference{ + Name: "claimA", + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claimA", + Namespace: "nsA", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "pvA", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + + ep := &api.Endpoints{ + ObjectMeta: api.ObjectMeta{ + Name: "ep", + }, + Subsets: []api.EndpointSubset{{ + Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}}, + Ports: []api.EndpointPort{{"foo", 80, api.ProtocolTCP}}, + }}, + } + + o := testclient.NewObjects(api.Scheme, api.Scheme) + o.Add(pv) + o.Add(claim) + o.Add(ep) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) + + // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes + spec := volume.NewSpecFromPersistentVolume(pv, true) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) + + if !builder.IsReadOnly() { + t.Errorf("Expected true for builder.IsReadOnly") + } } diff --git a/pkg/volume/host_path/host_path.go b/pkg/volume/host_path/host_path.go index 3768740509c..2afd40325d8 100644 --- a/pkg/volume/host_path/host_path.go +++ b/pkg/volume/host_path/host_path.go @@ -71,9 +71,15 @@ func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode func (plugin *hostPathPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, _ mount.Interface) (volume.Builder, error) { if spec.VolumeSource.HostPath != nil { - return &hostPathBuilder{&hostPath{spec.VolumeSource.HostPath.Path}}, nil + return &hostPathBuilder{ + hostPath: spec.VolumeSource.HostPath, + readOnly: false, + }, nil } else { - return &hostPathBuilder{&hostPath{spec.PersistentVolumeSource.HostPath.Path}}, nil + return &hostPathBuilder{ + hostPath: spec.PersistentVolumeSource.HostPath, + readOnly: spec.ReadOnly, + }, nil } } @@ -96,7 +102,7 @@ func newRecycler(spec *volume.Spec, host volume.VolumeHost) (volume.Recycler, er // HostPath volumes represent a bare host file or directory mount. // The direct at the specified path will be directly exposed to the container. type hostPath struct { - path string + path string } func (hp *hostPath) GetPath() string { @@ -105,6 +111,7 @@ func (hp *hostPath) GetPath() string { type hostPathBuilder struct { *hostPath + readOnly bool } var _ volume.Builder = &hostPathBuilder{} @@ -120,7 +127,7 @@ func (b *hostPathBuilder) SetUpAt(dir string) error { } func (b *hostPathBuilder) IsReadOnly() bool { - return false + return b.readOnly } func (b *hostPathBuilder) GetPath() string { diff --git a/pkg/volume/host_path/host_path_test.go b/pkg/volume/host_path/host_path_test.go index 0dfeeec89c4..0d4ffa217ae 100644 --- a/pkg/volume/host_path/host_path_test.go +++ b/pkg/volume/host_path/host_path_test.go @@ -20,6 +20,8 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" ) @@ -142,3 +144,50 @@ func TestPlugin(t *testing.T) { t.Errorf("Expected success, got: %v", err) } } + +func TestPersistentClaimReadOnlyFlag(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pvA", + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + HostPath: &api.HostPathVolumeSource{"foo"}, + }, + ClaimRef: &api.ObjectReference{ + Name: "claimA", + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claimA", + Namespace: "nsA", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "pvA", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + + o := testclient.NewObjects(api.Scheme, api.Scheme) + o.Add(pv) + o.Add(claim) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plug, _ := plugMgr.FindPluginByName(hostPathPluginName) + + // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes + spec := volume.NewSpecFromPersistentVolume(pv, true) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) + + if !builder.IsReadOnly() { + t.Errorf("Expected true for builder.IsReadOnly") + } +} diff --git a/pkg/volume/iscsi/iscsi_test.go b/pkg/volume/iscsi/iscsi_test.go index 5a66dc265f1..ce8b0f5d6c9 100644 --- a/pkg/volume/iscsi/iscsi_test.go +++ b/pkg/volume/iscsi/iscsi_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" @@ -193,5 +195,57 @@ func TestPluginPersistentVolume(t *testing.T) { }, }, } - doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol)) + doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) +} + +func TestPersistentClaimReadOnlyFlag(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pvA", + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + ISCSI: &api.ISCSIVolumeSource{ + TargetPortal: "127.0.0.1:3260", + IQN: "iqn.2014-12.server:storage.target01", + FSType: "ext4", + Lun: 0, + }, + }, + ClaimRef: &api.ObjectReference{ + Name: "claimA", + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claimA", + Namespace: "nsA", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "pvA", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + + o := testclient.NewObjects(api.Scheme, api.Scheme) + o.Add(pv) + o.Add(claim) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plug, _ := plugMgr.FindPluginByName(iscsiPluginName) + + // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes + spec := volume.NewSpecFromPersistentVolume(pv, true) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) + + if !builder.IsReadOnly() { + t.Errorf("Expected true for builder.IsReadOnly") + } } diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index 54151817dbd..f9544b72962 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -76,11 +76,13 @@ func (plugin *nfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.Vo func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Builder, error) { var source *api.NFSVolumeSource - + var readOnly bool if spec.VolumeSource.NFS != nil { source = spec.VolumeSource.NFS + readOnly = spec.VolumeSource.NFS.ReadOnly } else { source = spec.PersistentVolumeSource.NFS + readOnly = spec.ReadOnly } return &nfsBuilder{ nfs: &nfs{ @@ -91,7 +93,8 @@ func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mou }, server: source.Server, exportPath: source.Path, - readOnly: source.ReadOnly}, nil + readOnly: readOnly, + }, nil } func (plugin *nfsPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { diff --git a/pkg/volume/nfs/nfs_test.go b/pkg/volume/nfs/nfs_test.go index 837a75660f2..f13575ca9ae 100644 --- a/pkg/volume/nfs/nfs_test.go +++ b/pkg/volume/nfs/nfs_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" @@ -199,5 +201,52 @@ func TestPluginPersistentVolume(t *testing.T) { }, } - doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol)) + doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) +} + +func TestPersistentClaimReadOnlyFlag(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pvA", + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + NFS: &api.NFSVolumeSource{}, + }, + ClaimRef: &api.ObjectReference{ + Name: "claimA", + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claimA", + Namespace: "nsA", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "pvA", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + + o := testclient.NewObjects(api.Scheme, api.Scheme) + o.Add(pv) + o.Add(claim) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plug, _ := plugMgr.FindPluginByName(nfsPluginName) + + // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes + spec := volume.NewSpecFromPersistentVolume(pv, true) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) + + if !builder.IsReadOnly() { + t.Errorf("Expected true for builder.IsReadOnly") + } } diff --git a/pkg/volume/persistent_claim/persistent_claim.go b/pkg/volume/persistent_claim/persistent_claim.go index 55d48ac4be5..7a69cdc3a3c 100644 --- a/pkg/volume/persistent_claim/persistent_claim.go +++ b/pkg/volume/persistent_claim/persistent_claim.go @@ -53,7 +53,6 @@ func (plugin *persistentClaimPlugin) CanSupport(spec *volume.Spec) bool { } func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { - plugin.readOnly = spec.ReadOnly claim, err := plugin.host.GetKubeClient().PersistentVolumeClaims(pod.Namespace).Get(spec.VolumeSource.PersistentVolumeClaim.ClaimName) if err != nil { glog.Errorf("Error finding claim: %+v\n", spec.VolumeSource.PersistentVolumeClaim.ClaimName) diff --git a/pkg/volume/rbd/rbd_test.go b/pkg/volume/rbd/rbd_test.go index 94d3aa1c5ec..507972288a7 100644 --- a/pkg/volume/rbd/rbd_test.go +++ b/pkg/volume/rbd/rbd_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" @@ -151,5 +153,56 @@ func TestPluginPersistentVolume(t *testing.T) { }, } - doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol)) + doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false)) +} + +func TestPersistentClaimReadOnlyFlag(t *testing.T) { + pv := &api.PersistentVolume{ + ObjectMeta: api.ObjectMeta{ + Name: "pvA", + }, + Spec: api.PersistentVolumeSpec{ + PersistentVolumeSource: api.PersistentVolumeSource{ + RBD: &api.RBDVolumeSource{ + CephMonitors: []string{"a", "b"}, + RBDImage: "bar", + FSType: "ext4", + }, + }, + ClaimRef: &api.ObjectReference{ + Name: "claimA", + }, + }, + } + + claim := &api.PersistentVolumeClaim{ + ObjectMeta: api.ObjectMeta{ + Name: "claimA", + Namespace: "nsA", + }, + Spec: api.PersistentVolumeClaimSpec{ + VolumeName: "pvA", + }, + Status: api.PersistentVolumeClaimStatus{ + Phase: api.ClaimBound, + }, + } + + o := testclient.NewObjects(api.Scheme, api.Scheme) + o.Add(pv) + o.Add(claim) + client := &testclient.Fake{ReactFn: testclient.ObjectReaction(o, latest.RESTMapper)} + + plugMgr := volume.VolumePluginMgr{} + plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", client, nil)) + plug, _ := plugMgr.FindPluginByName(rbdPluginName) + + // readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes + spec := volume.NewSpecFromPersistentVolume(pv, true) + pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}, nil) + + if !builder.IsReadOnly() { + t.Errorf("Expected true for builder.IsReadOnly") + } } From 920cb34b1e37cd52d17f3eeccda14298f0a627e5 Mon Sep 17 00:00:00 2001 From: markturansky Date: Fri, 24 Jul 2015 15:04:03 -0400 Subject: [PATCH 20/49] rebased and updated to latest --- pkg/volume/glusterfs/glusterfs.go | 7 +++---- pkg/volume/nfs/nfs.go | 23 ++++++++++++----------- pkg/volume/rbd/disk_manager.go | 4 ++++ pkg/volume/rbd/rbd_util.go | 8 ++++++++ 4 files changed, 27 insertions(+), 15 deletions(-) diff --git a/pkg/volume/glusterfs/glusterfs.go b/pkg/volume/glusterfs/glusterfs.go index 3d9626c66d9..70a20c7b584 100644 --- a/pkg/volume/glusterfs/glusterfs.go +++ b/pkg/volume/glusterfs/glusterfs.go @@ -129,7 +129,6 @@ type glusterfsBuilder struct { hosts *api.Endpoints path string readOnly bool - mounter mount.Interface exe exec.Interface } @@ -162,8 +161,8 @@ func (b *glusterfsBuilder) SetUpAt(dir string) error { return err } -func (glusterfsVolume *glusterfs) IsReadOnly() bool { - return glusterfsVolume.readOnly +func (b *glusterfsBuilder) IsReadOnly() bool { + return b.readOnly } func (glusterfsVolume *glusterfs) GetPath() string { @@ -217,7 +216,7 @@ func (b *glusterfsBuilder) setUpAtInternal(dir string) error { var errs error options := []string{} - if glusterfsVolume.readOnly { + if b.readOnly { options = append(options, "ro") } diff --git a/pkg/volume/nfs/nfs.go b/pkg/volume/nfs/nfs.go index f9544b72962..47abedf68a0 100644 --- a/pkg/volume/nfs/nfs.go +++ b/pkg/volume/nfs/nfs.go @@ -187,21 +187,22 @@ func (b *nfsBuilder) SetUpAt(dir string) error { return nil } +func (b *nfsBuilder) IsReadOnly() bool { + return b.readOnly +} + +// +//func (c *nfsCleaner) GetPath() string { +// name := nfsPluginName +// return c.plugin.host.GetPodVolumeDir(c.pod.UID, util.EscapeQualifiedNameForDisk(name), c.volName) +//} + +var _ volume.Cleaner = &nfsCleaner{} + type nfsCleaner struct { *nfs } -func (nfsVolume *nfs) IsReadOnly() bool { - return nfsVolume.readOnly -} - -func (nfsVolume *nfs) GetPath() string { - name := nfsPluginName - return nfsVolume.plugin.host.GetPodVolumeDir(nfsVolume.pod.UID, util.EscapeQualifiedNameForDisk(name), nfsVolume.volName) -} - -var _ volume.Cleaner = &nfsCleaner{} - func (c *nfsCleaner) TearDown() error { return c.TearDownAt(c.GetPath()) } diff --git a/pkg/volume/rbd/disk_manager.go b/pkg/volume/rbd/disk_manager.go index 146a109e388..ec000e86ccd 100644 --- a/pkg/volume/rbd/disk_manager.go +++ b/pkg/volume/rbd/disk_manager.go @@ -62,7 +62,11 @@ func diskSetUp(manager diskManager, b rbdBuilder, volPath string, mounter mount. } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. options := []string{"bind"} +<<<<<<< HEAD if b.ReadOnly { +======= + if disk.readOnly { +>>>>>>> rebased and updated to latest options = append(options, "ro") } err = mounter.Mount(globalPDPath, volPath, "", options) diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index f40c358de0a..34d3eed769b 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -161,7 +161,11 @@ func (util *RBDUtil) loadRBD(rbd *rbd, mnt string) error { func (util *RBDUtil) fencing(b rbdBuilder) error { // no need to fence readOnly +<<<<<<< HEAD if b.ReadOnly { +======= + if rbd.readOnly { +>>>>>>> rebased and updated to latest return nil } return util.rbdLock(b, true) @@ -169,7 +173,11 @@ func (util *RBDUtil) fencing(b rbdBuilder) error { func (util *RBDUtil) defencing(c rbdCleaner) error { // no need to fence readOnly +<<<<<<< HEAD if c.ReadOnly { +======= + if rbd.readOnly { +>>>>>>> rebased and updated to latest return nil } From 8639f24374bd7648505a175c80943503b6088d64 Mon Sep 17 00:00:00 2001 From: markturansky Date: Mon, 27 Jul 2015 16:33:22 -0400 Subject: [PATCH 21/49] rebased and updated --- pkg/volume/gce_pd/gce_pd.go | 4 ++-- pkg/volume/rbd/disk_manager.go | 6 +----- pkg/volume/rbd/rbd_util.go | 10 +--------- 3 files changed, 4 insertions(+), 16 deletions(-) diff --git a/pkg/volume/gce_pd/gce_pd.go b/pkg/volume/gce_pd/gce_pd.go index 8969893592a..8ef2dbeb30d 100644 --- a/pkg/volume/gce_pd/gce_pd.go +++ b/pkg/volume/gce_pd/gce_pd.go @@ -229,8 +229,8 @@ func (b *gcePersistentDiskBuilder) SetUpAt(dir string) error { return nil } -func (pd *gcePersistentDisk) IsReadOnly() bool { - return pd.readOnly +func (b *gcePersistentDiskBuilder) IsReadOnly() bool { + return b.readOnly } func makeGlobalPDName(host volume.VolumeHost, devName string) string { diff --git a/pkg/volume/rbd/disk_manager.go b/pkg/volume/rbd/disk_manager.go index ec000e86ccd..d376957ae73 100644 --- a/pkg/volume/rbd/disk_manager.go +++ b/pkg/volume/rbd/disk_manager.go @@ -62,11 +62,7 @@ func diskSetUp(manager diskManager, b rbdBuilder, volPath string, mounter mount. } // Perform a bind mount to the full path to allow duplicate mounts of the same disk. options := []string{"bind"} -<<<<<<< HEAD - if b.ReadOnly { -======= - if disk.readOnly { ->>>>>>> rebased and updated to latest + if b.IsReadOnly() { options = append(options, "ro") } err = mounter.Mount(globalPDPath, volPath, "", options) diff --git a/pkg/volume/rbd/rbd_util.go b/pkg/volume/rbd/rbd_util.go index 34d3eed769b..545a4b2abd9 100644 --- a/pkg/volume/rbd/rbd_util.go +++ b/pkg/volume/rbd/rbd_util.go @@ -161,11 +161,7 @@ func (util *RBDUtil) loadRBD(rbd *rbd, mnt string) error { func (util *RBDUtil) fencing(b rbdBuilder) error { // no need to fence readOnly -<<<<<<< HEAD - if b.ReadOnly { -======= - if rbd.readOnly { ->>>>>>> rebased and updated to latest + if b.IsReadOnly() { return nil } return util.rbdLock(b, true) @@ -173,11 +169,7 @@ func (util *RBDUtil) fencing(b rbdBuilder) error { func (util *RBDUtil) defencing(c rbdCleaner) error { // no need to fence readOnly -<<<<<<< HEAD if c.ReadOnly { -======= - if rbd.readOnly { ->>>>>>> rebased and updated to latest return nil } From e8289ceb9da6aec391438a31d9ace0285f755f0a Mon Sep 17 00:00:00 2001 From: markturansky Date: Wed, 29 Jul 2015 14:48:06 -0400 Subject: [PATCH 22/49] rebased and updated --- pkg/volume/git_repo/git_repo.go | 2 +- pkg/volume/host_path/host_path.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/volume/git_repo/git_repo.go b/pkg/volume/git_repo/git_repo.go index 135110b8aa8..de431a5ce92 100644 --- a/pkg/volume/git_repo/git_repo.go +++ b/pkg/volume/git_repo/git_repo.go @@ -118,7 +118,7 @@ func (b *gitRepoVolumeBuilder) SetUp() error { return b.SetUpAt(b.GetPath()) } -func (gr *gitRepo) IsReadOnly() bool { +func (b *gitRepoVolumeBuilder) IsReadOnly() bool { return false } diff --git a/pkg/volume/host_path/host_path.go b/pkg/volume/host_path/host_path.go index 2afd40325d8..7e5118f1d4b 100644 --- a/pkg/volume/host_path/host_path.go +++ b/pkg/volume/host_path/host_path.go @@ -72,12 +72,12 @@ func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode func (plugin *hostPathPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, _ mount.Interface) (volume.Builder, error) { if spec.VolumeSource.HostPath != nil { return &hostPathBuilder{ - hostPath: spec.VolumeSource.HostPath, + hostPath: &hostPath{path: spec.VolumeSource.HostPath.Path}, readOnly: false, }, nil } else { return &hostPathBuilder{ - hostPath: spec.PersistentVolumeSource.HostPath, + hostPath: &hostPath{path: spec.PersistentVolumeSource.HostPath.Path}, readOnly: spec.ReadOnly, }, nil } @@ -102,7 +102,7 @@ func newRecycler(spec *volume.Spec, host volume.VolumeHost) (volume.Recycler, er // HostPath volumes represent a bare host file or directory mount. // The direct at the specified path will be directly exposed to the container. type hostPath struct { - path string + path string } func (hp *hostPath) GetPath() string { From 55e4941c5e3b55fc74c6b2091d3835fdcd976193 Mon Sep 17 00:00:00 2001 From: Joe Beda Date: Wed, 29 Jul 2015 13:10:37 -0700 Subject: [PATCH 23/49] Fix up memory requirements for release w/ boot2docker. --- hack/lib/golang.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index 367ef9fa59f..839498c5a16 100644 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -104,10 +104,16 @@ readonly KUBE_CLIENT_PLATFORMS=( windows/amd64 ) -# Gigabytes desired for parallel platform builds. 8 is fairly +# Gigabytes desired for parallel platform builds. 11 is fairly # arbitrary, but is a reasonable splitting point for 2015 # laptops-versus-not. -readonly KUBE_PARALLEL_BUILD_MEMORY=8 +# +# If you are using boot2docker, the following seems to work (note +# that 12000 rounds to 11G): +# boot2docker down +# VBoxManage modifyvm boot2docker-vm --memory 12000 +# boot2docker up +readonly KUBE_PARALLEL_BUILD_MEMORY=11 readonly KUBE_ALL_TARGETS=( "${KUBE_SERVER_TARGETS[@]}" From f64d89fd1d0c81eb1e3e6ea3a6b2ca6571a65c4b Mon Sep 17 00:00:00 2001 From: Gurvinder Singh Date: Wed, 29 Jul 2015 22:21:10 +0200 Subject: [PATCH 24/49] fixed documentations after running gendocs.sh --- examples/spark/README.md | 2 ++ examples/spark/images/driver/README.md | 37 ++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/examples/spark/README.md b/examples/spark/README.md index b204fce0dd6..cefc2762b37 100644 --- a/examples/spark/README.md +++ b/examples/spark/README.md @@ -152,6 +152,7 @@ The Spark driver is used to launch jobs into Spark cluster. You can read more ab ```shell $ kubectl create -f examples/spark/spark-driver.json ``` + The Spark driver needs the Master service to be running. ### Check to see if the driver is running @@ -208,6 +209,7 @@ Make sure the Master Pod is running (use: ```kubectl get pods```). ```kubectl create -f spark-driver.json``` + [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/README.md?pixel)]() diff --git a/examples/spark/images/driver/README.md b/examples/spark/images/driver/README.md index e69de29bb2d..2a36c4ee68e 100644 --- a/examples/spark/images/driver/README.md +++ b/examples/spark/images/driver/README.md @@ -0,0 +1,37 @@ + + + + +WARNING +WARNING +WARNING +WARNING +WARNING + +

PLEASE NOTE: This document applies to the HEAD of the source tree

+ +If you are using a released version of Kubernetes, you should +refer to the docs that go with that version. + + +The latest 1.0.x release of this document can be found +[here](http://releases.k8s.io/release-1.0/examples/spark/images/driver/README.md). + +Documentation for other releases can be found at +[releases.k8s.io](http://releases.k8s.io). + +-- + + + + + + + +[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/spark/images/driver/README.md?pixel)]() + From 132575bcf8fc45bd095d21ef987a3a351465eb83 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Tue, 28 Jul 2015 16:35:56 -0700 Subject: [PATCH 25/49] Update single node docker to 1.0.1 --- cluster/images/hyperkube/Makefile | 2 +- docs/getting-started-guides/docker.md | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/cluster/images/hyperkube/Makefile b/cluster/images/hyperkube/Makefile index a7c64fbcc4c..47304fc03a9 100644 --- a/cluster/images/hyperkube/Makefile +++ b/cluster/images/hyperkube/Makefile @@ -1,6 +1,6 @@ # build the hyperkube image. -VERSION=v0.18.2 +VERSION=v1.0.1 all: cp ../../saltbase/salt/helpers/safe_format_and_mount . diff --git a/docs/getting-started-guides/docker.md b/docs/getting-started-guides/docker.md index 48660c7ab64..07aac0a461a 100644 --- a/docs/getting-started-guides/docker.md +++ b/docs/getting-started-guides/docker.md @@ -59,13 +59,13 @@ Here's a diagram of what the final result will look like: ### Step One: Run etcd ```sh -docker run --net=host -d gcr.io/google_containers/etcd:2.0.9 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data +docker run --net=host -d gcr.io/google_containers/etcd:2.0.12 /usr/local/bin/etcd --addr=127.0.0.1:4001 --bind-addr=0.0.0.0:4001 --data-dir=/var/etcd/data ``` ### Step Two: Run the master ```sh -docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v0.21.2 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests +docker run --net=host -d -v /var/run/docker.sock:/var/run/docker.sock gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube kubelet --api_servers=http://localhost:8080 --v=2 --address=0.0.0.0 --enable_server --hostname_override=127.0.0.1 --config=/etc/kubernetes/manifests ``` This actually runs the kubelet, which in turn runs a [pod](../user-guide/pods.md) that contains the other master components. @@ -75,15 +75,15 @@ This actually runs the kubelet, which in turn runs a [pod](../user-guide/pods.md *Note, this could be combined with master above, but it requires --privileged for iptables manipulation* ```sh -docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v0.21.2 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 +docker run -d --net=host --privileged gcr.io/google_containers/hyperkube:v1.0.1 /hyperkube proxy --master=http://127.0.0.1:8080 --v=2 ``` ### Test it out At this point you should have a running Kubernetes cluster. You can test this by downloading the kubectl binary -([OS X](https://storage.googleapis.com/kubernetes-release/release/v0.18.2/bin/darwin/amd64/kubectl)) -([linux](https://storage.googleapis.com/kubernetes-release/release/v0.18.2/bin/linux/amd64/kubectl)) +([OS X](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/darwin/amd64/kubectl)) +([linux](https://storage.googleapis.com/kubernetes-release/release/v1.0.1/bin/linux/amd64/kubectl)) *Note:* On OS/X you will need to set up port forwarding via ssh: @@ -110,7 +110,7 @@ If you are running different Kubernetes clusters, you may need to specify `-s ht ### Run an application ```sh -kubectl -s http://localhost:8080 run-container nginx --image=nginx --port=80 +kubectl -s http://localhost:8080 run nginx --image=nginx --port=80 ``` now run `docker ps` you should see nginx running. You may need to wait a few minutes for the image to get pulled. @@ -125,7 +125,7 @@ This should print: ```console NAME LABELS SELECTOR IP PORT(S) -nginx run=nginx 80/TCP +nginx run=nginx run=nginx 80/TCP ``` If ip-addr is blank run the following command to obtain it. Know issue #10836 From 4d65ff1192a518c636cee19cff767c6ee9c55d20 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Wed, 29 Jul 2015 13:38:28 -0700 Subject: [PATCH 26/49] Update multinode instructions --- docs/getting-started-guides/docker-multinode.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/getting-started-guides/docker-multinode.md b/docs/getting-started-guides/docker-multinode.md index af7aba677ce..3cd368a0ead 100644 --- a/docs/getting-started-guides/docker-multinode.md +++ b/docs/getting-started-guides/docker-multinode.md @@ -39,7 +39,7 @@ interested in just starting to explore Kubernetes, we recommend that you start t _Note_: There is a [bug](https://github.com/docker/docker/issues/14106) in Docker 1.7.0 that prevents this from working correctly. -Please install Docker 1.6.2 or wait for Docker 1.7.1. +Please install Docker 1.6.2 or Docker 1.7.1. **Table of Contents** @@ -83,7 +83,7 @@ The first step in the process is to initialize the master node. Clone the Kubernetes repo, and run [master.sh](docker-multinode/master.sh) on the master machine with root: ```sh -export K8S_VERSION= +export K8S_VERSION= cd kubernetes/cluster/docker-multinode ./master.sh ``` @@ -99,7 +99,8 @@ Once your master is up and running you can add one or more workers on different Clone the Kubernetes repo, and run [worker.sh](docker-multinode/worker.sh) on the worker machine with root: ```sh -export K8S_VERSION= MASTER_IP= +export K8S_VERSION= +export MASTER_IP= cd kubernetes/cluster/docker-multinode ./worker.sh ``` From 99b02bfe737dec5c826f6de4aa8e924f21145119 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Tue, 28 Jul 2015 22:00:15 -0700 Subject: [PATCH 27/49] Add optional throttling to the proxy/exec/attach methods --- cmd/kube-apiserver/app/server.go | 5 ++++- cmd/kubelet/app/server.go | 2 +- pkg/capabilities/capabilities.go | 10 +++++++--- pkg/registry/generic/rest/proxy.go | 18 ++++++++++++++++-- pkg/registry/pod/etcd/etcd.go | 13 ++++++++++--- 5 files changed, 38 insertions(+), 10 deletions(-) diff --git a/cmd/kube-apiserver/app/server.go b/cmd/kube-apiserver/app/server.go index a9bec718601..b9c811d82ca 100644 --- a/cmd/kube-apiserver/app/server.go +++ b/cmd/kube-apiserver/app/server.go @@ -101,6 +101,7 @@ type APIServer struct { LongRunningRequestRE string SSHUser string SSHKeyfile string + MaxConnectionBytesPerSec int64 } // NewAPIServer creates a new APIServer object with default parameters @@ -205,6 +206,7 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.LongRunningRequestRE, "long-running-request-regexp", defaultLongRunningRequestRE, "A regular expression matching long running requests which should be excluded from maximum inflight request handling.") fs.StringVar(&s.SSHUser, "ssh-user", "", "If non-empty, use secure SSH proxy to the nodes, using this user name") fs.StringVar(&s.SSHKeyfile, "ssh-keyfile", "", "If non-empty, use secure SSH proxy to the nodes, using this user keyfile") + fs.Int64Var(&s.MaxConnectionBytesPerSec, "max-connection-bytes-per-sec", 0, "If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests") } // TODO: Longer term we should read this from some config store, rather than a flag. @@ -255,7 +257,8 @@ func (s *APIServer) Run(_ []string) error { capabilities.Initialize(capabilities.Capabilities{ AllowPrivileged: s.AllowPrivileged, // TODO(vmarmol): Implement support for HostNetworkSources. - HostNetworkSources: []string{}, + HostNetworkSources: []string{}, + PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec, }) cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 71a48c91c52..16dcf4d28fc 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -604,7 +604,7 @@ func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error { } else { glog.Warning("No api server defined - no events will be sent to API server.") } - capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources) + capabilities.Setup(kcfg.AllowPrivileged, kcfg.HostNetworkSources, 0) credentialprovider.SetPreferredDockercfgPath(kcfg.RootDirectory) diff --git a/pkg/capabilities/capabilities.go b/pkg/capabilities/capabilities.go index d105f1d6608..7a1281447c5 100644 --- a/pkg/capabilities/capabilities.go +++ b/pkg/capabilities/capabilities.go @@ -27,6 +27,9 @@ type Capabilities struct { // List of pod sources for which using host network is allowed. HostNetworkSources []string + + // PerConnectionBandwidthLimitBytesPerSec limits the throughput of each connection (currently only used for proxy, exec, attach) + PerConnectionBandwidthLimitBytesPerSec int64 } // TODO: Clean these up into a singleton @@ -43,10 +46,11 @@ func Initialize(c Capabilities) { } // Setup the capability set. It wraps Initialize for improving usibility. -func Setup(allowPrivileged bool, hostNetworkSources []string) { +func Setup(allowPrivileged bool, hostNetworkSources []string, perConnectionBytesPerSec int64) { Initialize(Capabilities{ - AllowPrivileged: allowPrivileged, - HostNetworkSources: hostNetworkSources, + AllowPrivileged: allowPrivileged, + HostNetworkSources: hostNetworkSources, + PerConnectionBandwidthLimitBytesPerSec: perConnectionBytesPerSec, }) } diff --git a/pkg/registry/generic/rest/proxy.go b/pkg/registry/generic/rest/proxy.go index 5339c596f46..12d34b250c7 100644 --- a/pkg/registry/generic/rest/proxy.go +++ b/pkg/registry/generic/rest/proxy.go @@ -34,6 +34,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/third_party/golang/netutil" "github.com/golang/glog" + "github.com/mxk/go-flowrate/flowrate" ) // UpgradeAwareProxyHandler is a handler for proxy requests that may require an upgrade @@ -42,6 +43,7 @@ type UpgradeAwareProxyHandler struct { Location *url.URL Transport http.RoundTripper FlushInterval time.Duration + MaxBytesPerSec int64 err error } @@ -152,7 +154,13 @@ func (h *UpgradeAwareProxyHandler) tryUpgrade(w http.ResponseWriter, req *http.R wg.Add(2) go func() { - _, err := io.Copy(backendConn, requestHijackedConn) + var writer io.WriteCloser + if h.MaxBytesPerSec > 0 { + writer = flowrate.NewWriter(backendConn, h.MaxBytesPerSec) + } else { + writer = backendConn + } + _, err := io.Copy(writer, requestHijackedConn) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { glog.Errorf("Error proxying data from client to backend: %v", err) } @@ -160,7 +168,13 @@ func (h *UpgradeAwareProxyHandler) tryUpgrade(w http.ResponseWriter, req *http.R }() go func() { - _, err := io.Copy(requestHijackedConn, backendConn) + var reader io.ReadCloser + if h.MaxBytesPerSec > 0 { + reader = flowrate.NewReader(backendConn, h.MaxBytesPerSec) + } else { + reader = backendConn + } + _, err := io.Copy(requestHijackedConn, reader) if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { glog.Errorf("Error proxying data from backend to client: %v", err) } diff --git a/pkg/registry/pod/etcd/etcd.go b/pkg/registry/pod/etcd/etcd.go index 74694f543af..bb7b4e2f716 100644 --- a/pkg/registry/pod/etcd/etcd.go +++ b/pkg/registry/pod/etcd/etcd.go @@ -26,6 +26,7 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors" etcderr "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors/etcd" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/rest" + "github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities" "github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" @@ -277,7 +278,7 @@ func (r *ProxyREST) Connect(ctx api.Context, id string, opts runtime.Object) (re return nil, err } location.Path = path.Join(location.Path, proxyOpts.Path) - return genericrest.NewUpgradeAwareProxyHandler(location, nil, false), nil + return newUpgradeAwareProxyHandler(location, nil, false), nil } // Support both GET and POST methods. Over time, we want to move all clients to start using POST and then stop supporting GET. @@ -307,7 +308,7 @@ func (r *ExecREST) Connect(ctx api.Context, name string, opts runtime.Object) (r if err != nil { return nil, err } - return genericrest.NewUpgradeAwareProxyHandler(location, transport, true), nil + return newUpgradeAwareProxyHandler(location, transport, true), nil } // NewConnectOptions returns the versioned object that represents exec parameters @@ -350,5 +351,11 @@ func (r *PortForwardREST) Connect(ctx api.Context, name string, opts runtime.Obj if err != nil { return nil, err } - return genericrest.NewUpgradeAwareProxyHandler(location, transport, true), nil + return newUpgradeAwareProxyHandler(location, transport, true), nil +} + +func newUpgradeAwareProxyHandler(location *url.URL, transport http.RoundTripper, upgradeRequired bool) *genericrest.UpgradeAwareProxyHandler { + handler := genericrest.NewUpgradeAwareProxyHandler(location, transport, upgradeRequired) + handler.MaxBytesPerSec = capabilities.Get().PerConnectionBandwidthLimitBytesPerSec + return handler } From 732647ea97033601a962455ae4fa2b883bc6a713 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 17 Jun 2015 15:48:27 -0400 Subject: [PATCH 28/49] Improve conversion to support multiple packages OpenShift uses multiple API packages (types are split) which Kube will also eventually have as we introduce more plugins. These changes make the generators able to handle importing different API object packages into a single generator function. --- cmd/genconversion/conversion.go | 13 +- cmd/gendeepcopy/deep_copy.go | 13 +- hack/update-generated-conversions.sh | 8 -- pkg/runtime/conversion_generator.go | 177 +++++++++++++++++++++---- pkg/runtime/deep_copy_generator.go | 188 +++++++++++++++++++++------ 5 files changed, 325 insertions(+), 74 deletions(-) diff --git a/cmd/genconversion/conversion.go b/cmd/genconversion/conversion.go index c25fb0d0aad..c60b8f1dacd 100644 --- a/cmd/genconversion/conversion.go +++ b/cmd/genconversion/conversion.go @@ -17,13 +17,16 @@ limitations under the License. package main import ( + "fmt" "io" "os" + "path" "runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -50,7 +53,9 @@ func main() { funcOut = file } - generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw()) + generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", *version)) + apiShort := generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") + generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource") // TODO(wojtek-t): Change the overwrites to a flag. generator.OverwritePackage(*version, "") for _, knownType := range api.Scheme.KnownTypes(*version) { @@ -58,10 +63,14 @@ func main() { glog.Errorf("error while generating conversion functions for %v: %v", knownType, err) } } + generator.RepackImports(util.NewStringSet()) + if err := generator.WriteImports(funcOut); err != nil { + glog.Fatalf("error while writing imports: %v", err) + } if err := generator.WriteConversionFunctions(funcOut); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } - if err := generator.RegisterConversionFunctions(funcOut); err != nil { + if err := generator.RegisterConversionFunctions(funcOut, fmt.Sprintf("%s.Scheme", apiShort)); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } } diff --git a/cmd/gendeepcopy/deep_copy.go b/cmd/gendeepcopy/deep_copy.go index 59ac43bb72f..dcb1c33766d 100644 --- a/cmd/gendeepcopy/deep_copy.go +++ b/cmd/gendeepcopy/deep_copy.go @@ -19,12 +19,14 @@ package main import ( "io" "os" + "path" "runtime" "strings" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -53,10 +55,14 @@ func main() { } knownVersion := *version + registerTo := "api.Scheme" if knownVersion == "api" { knownVersion = api.Scheme.Raw().InternalVersion + registerTo = "Scheme" } - generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw()) + pkgPath := path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", knownVersion) + generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), pkgPath, util.NewStringSet("github.com/GoogleCloudPlatform/kubernetes")) + generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") for _, overwrite := range strings.Split(*overwrites, ",") { vals := strings.Split(overwrite, "=") @@ -67,13 +73,14 @@ func main() { glog.Errorf("error while generating deep copy functions for %v: %v", knownType, err) } } - if err := generator.WriteImports(funcOut, *version); err != nil { + generator.RepackImports() + if err := generator.WriteImports(funcOut); err != nil { glog.Fatalf("error while writing imports: %v", err) } if err := generator.WriteDeepCopyFunctions(funcOut); err != nil { glog.Fatalf("error while writing deep copy functions: %v", err) } - if err := generator.RegisterDeepCopyFunctions(funcOut, *version); err != nil { + if err := generator.RegisterDeepCopyFunctions(funcOut, registerTo); err != nil { glog.Fatalf("error while registering deep copy functions: %v", err) } } diff --git a/hack/update-generated-conversions.sh b/hack/update-generated-conversions.sh index d7b9f9fd591..7df31ca18f8 100755 --- a/hack/update-generated-conversions.sh +++ b/hack/update-generated-conversions.sh @@ -33,14 +33,6 @@ function generate_version() { cat >> $TMPFILE < 0 { + name = dirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { + name = subdirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + } + } + for i := 2; i < 100; i++ { + generatedName := fmt.Sprintf("%s%d", name, i) + if _, ok := g.shortImports[generatedName]; !ok { + g.imports[pkg] = generatedName + g.shortImports[generatedName] = pkg + return generatedName + } + } + panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) +} + func (g *conversionGenerator) typeName(inType reflect.Type) string { switch inType.Kind() { - case reflect.Map: - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) + case reflect.Map: + if len(inType.Name()) == 0 { + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) + } + fallthrough default: - typeWithPkg := fmt.Sprintf("%s", inType) - slices := strings.Split(typeWithPkg, ".") - if len(slices) == 1 { + pkg, name := inType.PkgPath(), inType.Name() + if len(name) == 0 && inType.Kind() == reflect.Struct { + return "struct{}" + } + if len(pkg) == 0 { // Default package. - return slices[0] + return name } - if len(slices) == 2 { - pkg := slices[0] - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if pkg != "" { - pkg = pkg + "." - } - return pkg + slices[1] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val } - panic("Incorrect type name: " + typeWithPkg) + if len(pkg) == 0 { + return name + } + short := g.addImportByPath(pkg) + if len(short) > 0 { + return fmt.Sprintf("%s.%s", short, name) + } + return name } } @@ -658,6 +785,10 @@ func (g *conversionGenerator) existsDedicatedConversionFunction(inType, outType // unnamed. Thus we return false here. return false } + // TODO: no way to handle private conversions in different packages + if g.assumePrivateConversions { + return false + } return g.scheme.Converter().HasConversionFunc(inType, outType) } diff --git a/pkg/runtime/deep_copy_generator.go b/pkg/runtime/deep_copy_generator.go index 7be7af6bd86..20d931caa0a 100644 --- a/pkg/runtime/deep_copy_generator.go +++ b/pkg/runtime/deep_copy_generator.go @@ -19,6 +19,7 @@ package runtime import ( "fmt" "io" + "path" "reflect" "sort" "strings" @@ -38,9 +39,20 @@ type DeepCopyGenerator interface { // functions for this type and all nested types will be generated. AddType(inType reflect.Type) error + // ReplaceType registers a type that should be used instead of the type + // with the provided pkgPath and name. + ReplaceType(pkgPath, name string, in interface{}) + + // AddImport registers a package name with the generator and returns its + // short name. + AddImport(pkgPath string) string + + // RepackImports creates a stable ordering of import short names + RepackImports() + // Writes all imports that are necessary for deep-copy function and // their registration. - WriteImports(w io.Writer, pkg string) error + WriteImports(w io.Writer) error // Writes deel-copy functions for all types added via AddType() method // and their nested types. @@ -57,20 +69,80 @@ type DeepCopyGenerator interface { OverwritePackage(pkg, overwrite string) } -func NewDeepCopyGenerator(scheme *conversion.Scheme) DeepCopyGenerator { - return &deepCopyGenerator{ +func NewDeepCopyGenerator(scheme *conversion.Scheme, targetPkg string, include util.StringSet) DeepCopyGenerator { + g := &deepCopyGenerator{ scheme: scheme, + targetPkg: targetPkg, copyables: make(map[reflect.Type]bool), - imports: util.StringSet{}, + imports: make(map[string]string), + shortImports: make(map[string]string), pkgOverwrites: make(map[string]string), + replace: make(map[pkgPathNamePair]reflect.Type), + include: include, } + g.targetPackage(targetPkg) + g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") + return g +} + +type pkgPathNamePair struct { + PkgPath string + Name string } type deepCopyGenerator struct { - scheme *conversion.Scheme - copyables map[reflect.Type]bool - imports util.StringSet + scheme *conversion.Scheme + targetPkg string + copyables map[reflect.Type]bool + // map of package names to shortname + imports map[string]string + // map of short names to package names + shortImports map[string]string pkgOverwrites map[string]string + replace map[pkgPathNamePair]reflect.Type + include util.StringSet +} + +func (g *deepCopyGenerator) addImportByPath(pkg string) string { + if name, ok := g.imports[pkg]; ok { + return name + } + name := path.Base(pkg) + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 { + name = dirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { + name = subdirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + } + } + for i := 2; i < 100; i++ { + generatedName := fmt.Sprintf("%s%d", name, i) + if _, ok := g.shortImports[generatedName]; !ok { + g.imports[pkg] = generatedName + g.shortImports[generatedName] = pkg + return generatedName + } + } + panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) +} + +func (g *deepCopyGenerator) targetPackage(pkg string) { + g.imports[pkg] = "" + g.shortImports[""] = pkg } func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { @@ -90,11 +162,18 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return err } case reflect.Interface: - g.imports.Insert(inType.PkgPath()) + g.addImportByPath(inType.PkgPath()) return nil case reflect.Struct: - g.imports.Insert(inType.PkgPath()) - if !strings.HasPrefix(inType.PkgPath(), "github.com/GoogleCloudPlatform/kubernetes") { + g.addImportByPath(inType.PkgPath()) + found := false + for s := range g.include { + if strings.HasPrefix(inType.PkgPath(), s) { + found = true + break + } + } + if !found { return nil } for i := 0; i < inType.NumField(); i++ { @@ -110,6 +189,15 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return nil } +func (g *deepCopyGenerator) AddImport(pkg string) string { + return g.addImportByPath(pkg) +} + +// ReplaceType registers a replacement type to be used instead of the named type +func (g *deepCopyGenerator) ReplaceType(pkgPath, name string, t interface{}) { + g.replace[pkgPathNamePair{pkgPath, name}] = reflect.TypeOf(t) +} + func (g *deepCopyGenerator) AddType(inType reflect.Type) error { if inType.Kind() != reflect.Struct { return fmt.Errorf("non-struct copies are not supported") @@ -117,10 +205,23 @@ func (g *deepCopyGenerator) AddType(inType reflect.Type) error { return g.addAllRecursiveTypes(inType) } -func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error { +func (g *deepCopyGenerator) RepackImports() { + var packages []string + for key := range g.imports { + packages = append(packages, key) + } + sort.Strings(packages) + g.imports = make(map[string]string) + g.shortImports = make(map[string]string) + + g.targetPackage(g.targetPkg) + for _, pkg := range packages { + g.addImportByPath(pkg) + } +} + +func (g *deepCopyGenerator) WriteImports(w io.Writer) error { var packages []string - packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/api") - packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") for key := range g.imports { packages = append(packages, key) } @@ -130,10 +231,13 @@ func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error { indent := 0 buffer.addLine("import (\n", indent) for _, importPkg := range packages { - if strings.HasSuffix(importPkg, pkg) { + if len(importPkg) == 0 { continue } - buffer.addLine(fmt.Sprintf("\"%s\"\n", importPkg), indent+1) + if len(g.imports[importPkg]) == 0 { + continue + } + buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1) } buffer.addLine(")\n", indent) buffer.addLine("\n", indent) @@ -159,35 +263,47 @@ func (s byPkgAndName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (g *deepCopyGenerator) typeName(inType reflect.Type) string { +func (g *deepCopyGenerator) nameForType(inType reflect.Type) string { switch inType.Kind() { - case reflect.Map: - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) + case reflect.Map: + if len(inType.Name()) == 0 { + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) + } + fallthrough default: - typeWithPkg := fmt.Sprintf("%s", inType) - slices := strings.Split(typeWithPkg, ".") - if len(slices) == 1 { + pkg, name := inType.PkgPath(), inType.Name() + if len(name) == 0 && inType.Kind() == reflect.Struct { + return "struct{}" + } + if len(pkg) == 0 { // Default package. - return slices[0] + return name } - if len(slices) == 2 { - pkg := slices[0] - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if pkg != "" { - pkg = pkg + "." - } - return pkg + slices[1] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val } - panic("Incorrect type name: " + typeWithPkg) + if len(pkg) == 0 { + return name + } + short := g.addImportByPath(pkg) + if len(short) > 0 { + return fmt.Sprintf("%s.%s", short, name) + } + return name } } +func (g *deepCopyGenerator) typeName(inType reflect.Type) string { + if t, ok := g.replace[pkgPathNamePair{inType.PkgPath(), inType.Name()}]; ok { + return g.nameForType(t) + } + return g.nameForType(inType) +} + func (g *deepCopyGenerator) deepCopyFunctionName(inType reflect.Type) string { funcNameFormat := "deepCopy_%s_%s" inPkg := packageForName(inType) @@ -442,12 +558,8 @@ func (g *deepCopyGenerator) writeDeepCopyForType(b *buffer, inType reflect.Type, func (g *deepCopyGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) { b.addLine("func init() {\n", indent) - registerFormat := "err := %sScheme.AddGeneratedDeepCopyFuncs(\n" - if pkg == "api" { - b.addLine(fmt.Sprintf(registerFormat, ""), indent+1) - } else { - b.addLine(fmt.Sprintf(registerFormat, "api."), indent+1) - } + registerFormat := "err := %s.AddGeneratedDeepCopyFuncs(\n" + b.addLine(fmt.Sprintf(registerFormat, pkg), indent+1) } func (g *deepCopyGenerator) writeRegisterFooter(b *buffer, indent int) { From 487fe2d1162e83ee2d10c36f824d1ff3e778dbb3 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 29 Jul 2015 17:06:55 -0400 Subject: [PATCH 29/49] Update deep copies --- pkg/api/deep_copy_generated.go | 38 +++++++++++++++---------------- pkg/api/v1/deep_copy_generated.go | 36 ++++++++++++++--------------- 2 files changed, 37 insertions(+), 37 deletions(-) diff --git a/pkg/api/deep_copy_generated.go b/pkg/api/deep_copy_generated.go index 7e9c27b0581..c4e083c068e 100644 --- a/pkg/api/deep_copy_generated.go +++ b/pkg/api/deep_copy_generated.go @@ -18,14 +18,14 @@ package api // AUTO-GENERATED FUNCTIONS START HERE import ( - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" - "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" - "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" - "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" - "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/util" - "speter.net/go/exp/math/dec/inf" - "time" + resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" + conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + fields "github.com/GoogleCloudPlatform/kubernetes/pkg/fields" + labels "github.com/GoogleCloudPlatform/kubernetes/pkg/labels" + runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + util "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + inf "speter.net/go/exp/math/dec/inf" + time "time" ) func deepCopy_api_AWSElasticBlockStoreVolumeSource(in AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, c *conversion.Cloner) error { @@ -587,7 +587,7 @@ func deepCopy_api_LimitRange(in LimitRange, out *LimitRange, c *conversion.Clone func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { out.Type = in.Type if in.Max != nil { - out.Max = make(map[ResourceName]resource.Quantity) + out.Max = make(ResourceList) for key, val := range in.Max { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -599,7 +599,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv out.Max = nil } if in.Min != nil { - out.Min = make(map[ResourceName]resource.Quantity) + out.Min = make(ResourceList) for key, val := range in.Min { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -611,7 +611,7 @@ func deepCopy_api_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conv out.Min = nil } if in.Default != nil { - out.Default = make(map[ResourceName]resource.Quantity) + out.Default = make(ResourceList) for key, val := range in.Default { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -857,7 +857,7 @@ func deepCopy_api_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) err func deepCopy_api_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1041,7 +1041,7 @@ func deepCopy_api_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, ou out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1143,7 +1143,7 @@ func deepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist func deepCopy_api_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1571,7 +1571,7 @@ func deepCopy_api_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1587,7 +1587,7 @@ func deepCopy_api_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1599,7 +1599,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota out.Hard = nil } if in.Used != nil { - out.Used = make(map[ResourceName]resource.Quantity) + out.Used = make(ResourceList) for key, val := range in.Used { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1615,7 +1615,7 @@ func deepCopy_api_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuota func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { if in.Limits != nil { - out.Limits = make(map[ResourceName]resource.Quantity) + out.Limits = make(ResourceList) for key, val := range in.Limits { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1627,7 +1627,7 @@ func deepCopy_api_ResourceRequirements(in ResourceRequirements, out *ResourceReq out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[ResourceName]resource.Quantity) + out.Requests = make(ResourceList) for key, val := range in.Requests { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { diff --git a/pkg/api/v1/deep_copy_generated.go b/pkg/api/v1/deep_copy_generated.go index b61b2cdce3c..91c9b8d3f68 100644 --- a/pkg/api/v1/deep_copy_generated.go +++ b/pkg/api/v1/deep_copy_generated.go @@ -18,13 +18,13 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE import ( - "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" - "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" - "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/util" - "speter.net/go/exp/math/dec/inf" - "time" + api "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" + conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + util "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + inf "speter.net/go/exp/math/dec/inf" + time "time" ) func deepCopy_resource_Quantity(in resource.Quantity, out *resource.Quantity, c *conversion.Cloner) error { @@ -600,7 +600,7 @@ func deepCopy_v1_LimitRange(in LimitRange, out *LimitRange, c *conversion.Cloner func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conversion.Cloner) error { out.Type = in.Type if in.Max != nil { - out.Max = make(map[ResourceName]resource.Quantity) + out.Max = make(ResourceList) for key, val := range in.Max { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -612,7 +612,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve out.Max = nil } if in.Min != nil { - out.Min = make(map[ResourceName]resource.Quantity) + out.Min = make(ResourceList) for key, val := range in.Min { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -624,7 +624,7 @@ func deepCopy_v1_LimitRangeItem(in LimitRangeItem, out *LimitRangeItem, c *conve out.Min = nil } if in.Default != nil { - out.Default = make(map[ResourceName]resource.Quantity) + out.Default = make(ResourceList) for key, val := range in.Default { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -860,7 +860,7 @@ func deepCopy_v1_NodeSpec(in NodeSpec, out *NodeSpec, c *conversion.Cloner) erro func deepCopy_v1_NodeStatus(in NodeStatus, out *NodeStatus, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1044,7 +1044,7 @@ func deepCopy_v1_PersistentVolumeClaimStatus(in PersistentVolumeClaimStatus, out out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1146,7 +1146,7 @@ func deepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste func deepCopy_v1_PersistentVolumeSpec(in PersistentVolumeSpec, out *PersistentVolumeSpec, c *conversion.Cloner) error { if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1580,7 +1580,7 @@ func deepCopy_v1_ResourceQuotaList(in ResourceQuotaList, out *ResourceQuotaList, func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1596,7 +1596,7 @@ func deepCopy_v1_ResourceQuotaSpec(in ResourceQuotaSpec, out *ResourceQuotaSpec, func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaStatus, c *conversion.Cloner) error { if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1608,7 +1608,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS out.Hard = nil } if in.Used != nil { - out.Used = make(map[ResourceName]resource.Quantity) + out.Used = make(ResourceList) for key, val := range in.Used { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1624,7 +1624,7 @@ func deepCopy_v1_ResourceQuotaStatus(in ResourceQuotaStatus, out *ResourceQuotaS func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequirements, c *conversion.Cloner) error { if in.Limits != nil { - out.Limits = make(map[ResourceName]resource.Quantity) + out.Limits = make(ResourceList) for key, val := range in.Limits { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { @@ -1636,7 +1636,7 @@ func deepCopy_v1_ResourceRequirements(in ResourceRequirements, out *ResourceRequ out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[ResourceName]resource.Quantity) + out.Requests = make(ResourceList) for key, val := range in.Requests { newVal := new(resource.Quantity) if err := deepCopy_resource_Quantity(val, newVal, c); err != nil { From 1d41f5ac75011daf465a7f799401793bd6bede81 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Wed, 29 Jul 2015 17:07:14 -0400 Subject: [PATCH 30/49] Update generated conversions --- pkg/api/v1/conversion_generated.go | 55 +++++++++++++++--------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/pkg/api/v1/conversion_generated.go b/pkg/api/v1/conversion_generated.go index d98300aba21..52cdbe805f6 100644 --- a/pkg/api/v1/conversion_generated.go +++ b/pkg/api/v1/conversion_generated.go @@ -16,15 +16,14 @@ limitations under the License. package v1 +// AUTO-GENERATED FUNCTIONS START HERE import ( - "reflect" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/api" - "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" - "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + api "github.com/GoogleCloudPlatform/kubernetes/pkg/api" + resource "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource" + conversion "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion" + reflect "reflect" ) -// AUTO-GENERATED FUNCTIONS START HERE func convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { if defaulting, found := s.DefaultingInterface(reflect.TypeOf(*in)); found { defaulting.(func(*api.AWSElasticBlockStoreVolumeSource))(in) @@ -692,7 +691,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out } out.Type = LimitType(in.Type) if in.Max != nil { - out.Max = make(map[ResourceName]resource.Quantity) + out.Max = make(ResourceList) for key, val := range in.Max { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -704,7 +703,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out out.Max = nil } if in.Min != nil { - out.Min = make(map[ResourceName]resource.Quantity) + out.Min = make(ResourceList) for key, val := range in.Min { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -716,7 +715,7 @@ func convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out out.Min = nil } if in.Default != nil { - out.Default = make(map[ResourceName]resource.Quantity) + out.Default = make(ResourceList) for key, val := range in.Default { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1006,7 +1005,7 @@ func convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *NodeStatus defaulting.(func(*api.NodeStatus))(in) } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1216,7 +1215,7 @@ func convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(i out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1330,7 +1329,7 @@ func convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.Persist defaulting.(func(*api.PersistentVolumeSpec))(in) } if in.Capacity != nil { - out.Capacity = make(map[ResourceName]resource.Quantity) + out.Capacity = make(ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1735,7 +1734,7 @@ func convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuota defaulting.(func(*api.ResourceQuotaSpec))(in) } if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1754,7 +1753,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ defaulting.(func(*api.ResourceQuotaStatus))(in) } if in.Hard != nil { - out.Hard = make(map[ResourceName]resource.Quantity) + out.Hard = make(ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1766,7 +1765,7 @@ func convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQ out.Hard = nil } if in.Used != nil { - out.Used = make(map[ResourceName]resource.Quantity) + out.Used = make(ResourceList) for key, val := range in.Used { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1785,7 +1784,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc defaulting.(func(*api.ResourceRequirements))(in) } if in.Limits != nil { - out.Limits = make(map[ResourceName]resource.Quantity) + out.Limits = make(ResourceList) for key, val := range in.Limits { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -1797,7 +1796,7 @@ func convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.Resourc out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[ResourceName]resource.Quantity) + out.Requests = make(ResourceList) for key, val := range in.Requests { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -2942,7 +2941,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap } out.Type = api.LimitType(in.Type) if in.Max != nil { - out.Max = make(map[api.ResourceName]resource.Quantity) + out.Max = make(api.ResourceList) for key, val := range in.Max { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -2954,7 +2953,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap out.Max = nil } if in.Min != nil { - out.Min = make(map[api.ResourceName]resource.Quantity) + out.Min = make(api.ResourceList) for key, val := range in.Min { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -2966,7 +2965,7 @@ func convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *LimitRangeItem, out *ap out.Min = nil } if in.Default != nil { - out.Default = make(map[api.ResourceName]resource.Quantity) + out.Default = make(api.ResourceList) for key, val := range in.Default { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3256,7 +3255,7 @@ func convert_v1_NodeStatus_To_api_NodeStatus(in *NodeStatus, out *api.NodeStatus defaulting.(func(*NodeStatus))(in) } if in.Capacity != nil { - out.Capacity = make(map[api.ResourceName]resource.Quantity) + out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3466,7 +3465,7 @@ func convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(i out.AccessModes = nil } if in.Capacity != nil { - out.Capacity = make(map[api.ResourceName]resource.Quantity) + out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3580,7 +3579,7 @@ func convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *PersistentV defaulting.(func(*PersistentVolumeSpec))(in) } if in.Capacity != nil { - out.Capacity = make(map[api.ResourceName]resource.Quantity) + out.Capacity = make(api.ResourceList) for key, val := range in.Capacity { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -3985,7 +3984,7 @@ func convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *ResourceQuotaSpec defaulting.(func(*ResourceQuotaSpec))(in) } if in.Hard != nil { - out.Hard = make(map[api.ResourceName]resource.Quantity) + out.Hard = make(api.ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4004,7 +4003,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota defaulting.(func(*ResourceQuotaStatus))(in) } if in.Hard != nil { - out.Hard = make(map[api.ResourceName]resource.Quantity) + out.Hard = make(api.ResourceList) for key, val := range in.Hard { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4016,7 +4015,7 @@ func convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *ResourceQuota out.Hard = nil } if in.Used != nil { - out.Used = make(map[api.ResourceName]resource.Quantity) + out.Used = make(api.ResourceList) for key, val := range in.Used { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4035,7 +4034,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq defaulting.(func(*ResourceRequirements))(in) } if in.Limits != nil { - out.Limits = make(map[api.ResourceName]resource.Quantity) + out.Limits = make(api.ResourceList) for key, val := range in.Limits { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { @@ -4047,7 +4046,7 @@ func convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *ResourceReq out.Limits = nil } if in.Requests != nil { - out.Requests = make(map[api.ResourceName]resource.Quantity) + out.Requests = make(api.ResourceList) for key, val := range in.Requests { newVal := resource.Quantity{} if err := s.Convert(&val, &newVal, 0); err != nil { From 1ff8964c20a694eca03abc14181f9ccc3dc01dd9 Mon Sep 17 00:00:00 2001 From: Paul Morie Date: Wed, 29 Jul 2015 17:11:19 -0400 Subject: [PATCH 31/49] Accurately report ts used to make decisions in node-controller.go --- pkg/cloudprovider/nodecontroller/nodecontroller.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/cloudprovider/nodecontroller/nodecontroller.go b/pkg/cloudprovider/nodecontroller/nodecontroller.go index 95e072061f8..cc1667e20bc 100644 --- a/pkg/cloudprovider/nodecontroller/nodecontroller.go +++ b/pkg/cloudprovider/nodecontroller/nodecontroller.go @@ -247,7 +247,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap // - both saved and current statuses have Ready Conditions, different LastProbeTimes and different Ready Condition State - // Ready Condition changed it state since we last seen it, so we update both probeTimestamp and readyTransitionTimestamp. // TODO: things to consider: - // - if 'LastProbeTime' have gone back in time its probably and error, currently we ignore it, + // - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it, // - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check // if that's the case, but it does not seem necessary. savedCondition := nc.getCondition(&savedNodeStatus.status, api.NodeReady) @@ -374,18 +374,20 @@ func (nc *NodeController) monitorNodeStatus() error { continue } + decisionTimestamp := nc.now() + if readyCondition != nil { - // Check eviction timeout. + // Check eviction timeout against decisionTimestamp if lastReadyCondition.Status == api.ConditionFalse && - nc.now().After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) { + decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) { if nc.podEvictor.AddNodeToEvict(node.Name) { - glog.Infof("Adding pods to evict: %v is later than %v + %v", nc.now(), nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout) + glog.Infof("Adding pods to evict: %v is later than %v + %v", decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout) } } if lastReadyCondition.Status == api.ConditionUnknown && - nc.now().After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout-gracePeriod)) { + decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout-gracePeriod)) { if nc.podEvictor.AddNodeToEvict(node.Name) { - glog.Infof("Adding pods to evict2: %v is later than %v + %v", nc.now(), nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod) + glog.Infof("Adding pods to evict2: %v is later than %v + %v", decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod) } } if lastReadyCondition.Status == api.ConditionTrue { From 53ee37959b4166bc30599f0338d93ed8074f7403 Mon Sep 17 00:00:00 2001 From: jayunit100 Date: Wed, 29 Jul 2015 16:16:58 -0400 Subject: [PATCH 32/49] Rename getPodControllers to getPodController so that the truncate/ignore of overlapping RCs is clear --- pkg/controller/replication/replication_controller.go | 12 ++++++------ .../replication/replication_controller_test.go | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pkg/controller/replication/replication_controller.go b/pkg/controller/replication/replication_controller.go index e05a4ce35db..6725dc71d07 100644 --- a/pkg/controller/replication/replication_controller.go +++ b/pkg/controller/replication/replication_controller.go @@ -191,9 +191,9 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) { rm.queue.ShutDown() } -// getPodControllers returns the controller managing the given pod. +// getPodController returns the controller managing the given pod. // TODO: Surface that we are ignoring multiple controllers for a single pod. -func (rm *ReplicationManager) getPodControllers(pod *api.Pod) *api.ReplicationController { +func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationController { controllers, err := rm.rcStore.GetPodControllers(pod) if err != nil { glog.V(4).Infof("No controllers found for pod %v, replication manager will avoid syncing", pod.Name) @@ -211,7 +211,7 @@ func (rm *ReplicationManager) getPodControllers(pod *api.Pod) *api.ReplicationCo // When a pod is created, enqueue the controller that manages it and update it's expectations. func (rm *ReplicationManager) addPod(obj interface{}) { pod := obj.(*api.Pod) - if rc := rm.getPodControllers(pod); rc != nil { + if rc := rm.getPodController(pod); rc != nil { rcKey, err := controller.KeyFunc(rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) @@ -232,7 +232,7 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) { } // TODO: Write a unittest for this case curPod := cur.(*api.Pod) - if rc := rm.getPodControllers(curPod); rc != nil { + if rc := rm.getPodController(curPod); rc != nil { rm.enqueueController(rc) } oldPod := old.(*api.Pod) @@ -240,7 +240,7 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) { if !reflect.DeepEqual(curPod.Labels, oldPod.Labels) { // If the old and new rc are the same, the first one that syncs // will set expectations preventing any damage from the second. - if oldRC := rm.getPodControllers(oldPod); oldRC != nil { + if oldRC := rm.getPodController(oldPod); oldRC != nil { rm.enqueueController(oldRC) } } @@ -267,7 +267,7 @@ func (rm *ReplicationManager) deletePod(obj interface{}) { return } } - if rc := rm.getPodControllers(pod); rc != nil { + if rc := rm.getPodController(pod); rc != nil { rcKey, err := controller.KeyFunc(rc) if err != nil { glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err) diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index 292c116515e..e63191d15ce 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -484,7 +484,7 @@ func TestPodControllerLookup(t *testing.T) { for _, r := range c.inRCs { manager.rcStore.Add(r) } - if rc := manager.getPodControllers(c.pod); rc != nil { + if rc := manager.getPodController(c.pod); rc != nil { if c.outRCName != rc.Name { t.Errorf("Got controller %+v expected %+v", rc.Name, c.outRCName) } From 5394aa979f195a7b480ca0f46d996f6fe05910e2 Mon Sep 17 00:00:00 2001 From: Paul Morie Date: Tue, 7 Jul 2015 12:40:55 -0400 Subject: [PATCH 33/49] Make emptyDir volumes work for non-root UIDs --- .../for-tests/mount-tester-user/Dockerfile | 16 ++ contrib/for-tests/mount-tester-user/Makefile | 9 + contrib/for-tests/mount-tester/Makefile | 2 +- contrib/for-tests/mount-tester/mt.go | 59 +++- pkg/securitycontext/util.go | 27 +- pkg/securitycontext/util_test.go | 85 ++++++ pkg/volume/empty_dir/chcon_runner.go | 27 ++ pkg/volume/empty_dir/chcon_runner_linux.go | 34 +++ .../empty_dir/chcon_runner_unsupported.go | 26 ++ pkg/volume/empty_dir/empty_dir.go | 136 +++++++-- pkg/volume/empty_dir/empty_dir_linux.go | 6 + pkg/volume/empty_dir/empty_dir_test.go | 266 ++++++++++++------ pkg/volume/empty_dir/empty_dir_unsupported.go | 8 +- test/e2e/empty_dir.go | 203 ++++++++++--- 14 files changed, 739 insertions(+), 165 deletions(-) create mode 100644 contrib/for-tests/mount-tester-user/Dockerfile create mode 100644 contrib/for-tests/mount-tester-user/Makefile create mode 100644 pkg/securitycontext/util_test.go create mode 100644 pkg/volume/empty_dir/chcon_runner.go create mode 100644 pkg/volume/empty_dir/chcon_runner_linux.go create mode 100644 pkg/volume/empty_dir/chcon_runner_unsupported.go diff --git a/contrib/for-tests/mount-tester-user/Dockerfile b/contrib/for-tests/mount-tester-user/Dockerfile new file mode 100644 index 00000000000..70be763cc90 --- /dev/null +++ b/contrib/for-tests/mount-tester-user/Dockerfile @@ -0,0 +1,16 @@ +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM gcr.io/google-containers/mounttest:0.3 +USER 1001 diff --git a/contrib/for-tests/mount-tester-user/Makefile b/contrib/for-tests/mount-tester-user/Makefile new file mode 100644 index 00000000000..0cb05d763b5 --- /dev/null +++ b/contrib/for-tests/mount-tester-user/Makefile @@ -0,0 +1,9 @@ +all: push + +TAG = 0.1 + +image: + sudo docker build -t gcr.io/google_containers/mounttest-user:$(TAG) . + +push: image + gcloud docker push gcr.io/google_containers/mounttest-user:$(TAG) diff --git a/contrib/for-tests/mount-tester/Makefile b/contrib/for-tests/mount-tester/Makefile index 37d37859068..01e6f584a52 100644 --- a/contrib/for-tests/mount-tester/Makefile +++ b/contrib/for-tests/mount-tester/Makefile @@ -1,6 +1,6 @@ all: push -TAG = 0.2 +TAG = 0.3 mt: mt.go CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./mt.go diff --git a/contrib/for-tests/mount-tester/mt.go b/contrib/for-tests/mount-tester/mt.go index 0a39d7cc86d..3e6fd3b382b 100644 --- a/contrib/for-tests/mount-tester/mt.go +++ b/contrib/for-tests/mount-tester/mt.go @@ -25,17 +25,23 @@ import ( ) var ( - fsTypePath = "" - fileModePath = "" - readFileContentPath = "" - readWriteNewFilePath = "" + fsTypePath = "" + fileModePath = "" + filePermPath = "" + readFileContentPath = "" + newFilePath0644 = "" + newFilePath0666 = "" + newFilePath0777 = "" ) func init() { flag.StringVar(&fsTypePath, "fs_type", "", "Path to print the fs type for") - flag.StringVar(&fileModePath, "file_mode", "", "Path to print the filemode of") + flag.StringVar(&fileModePath, "file_mode", "", "Path to print the mode bits of") + flag.StringVar(&filePermPath, "file_perm", "", "Path to print the perms of") flag.StringVar(&readFileContentPath, "file_content", "", "Path to read the file content from") - flag.StringVar(&readWriteNewFilePath, "rw_new_file", "", "Path to write to and read from") + flag.StringVar(&newFilePath0644, "new_file_0644", "", "Path to write to and read from with perm 0644") + flag.StringVar(&newFilePath0666, "new_file_0666", "", "Path to write to and read from with perm 0666") + flag.StringVar(&newFilePath0777, "new_file_0777", "", "Path to write to and read from with perm 0777") } // This program performs some tests on the filesystem as dictated by the @@ -48,6 +54,9 @@ func main() { errs = []error{} ) + // Clear the umask so we can set any mode bits we want. + syscall.Umask(0000) + // NOTE: the ordering of execution of the various command line // flags is intentional and allows a single command to: // @@ -62,7 +71,17 @@ func main() { errs = append(errs, err) } - err = readWriteNewFile(readWriteNewFilePath) + err = readWriteNewFile(newFilePath0644, 0644) + if err != nil { + errs = append(errs, err) + } + + err = readWriteNewFile(newFilePath0666, 0666) + if err != nil { + errs = append(errs, err) + } + + err = readWriteNewFile(newFilePath0777, 0777) if err != nil { errs = append(errs, err) } @@ -72,6 +91,11 @@ func main() { errs = append(errs, err) } + err = filePerm(filePermPath) + if err != nil { + errs = append(errs, err) + } + err = readFileContent(readFileContentPath) if err != nil { errs = append(errs, err) @@ -94,7 +118,7 @@ func fsType(path string) error { buf := syscall.Statfs_t{} if err := syscall.Statfs(path, &buf); err != nil { - fmt.Printf("error from statfs(%q): %v", path, err) + fmt.Printf("error from statfs(%q): %v\n", path, err) return err } @@ -122,6 +146,21 @@ func fileMode(path string) error { return nil } +func filePerm(path string) error { + if path == "" { + return nil + } + + fileinfo, err := os.Lstat(path) + if err != nil { + fmt.Printf("error from Lstat(%q): %v\n", path, err) + return err + } + + fmt.Printf("perms of file %q: %v\n", path, fileinfo.Mode().Perm()) + return nil +} + func readFileContent(path string) error { if path == "" { return nil @@ -138,13 +177,13 @@ func readFileContent(path string) error { return nil } -func readWriteNewFile(path string) error { +func readWriteNewFile(path string, perm os.FileMode) error { if path == "" { return nil } content := "mount-tester new file\n" - err := ioutil.WriteFile(path, []byte(content), 0644) + err := ioutil.WriteFile(path, []byte(content), perm) if err != nil { fmt.Printf("error writing new file %q: %v\n", path, err) return err diff --git a/pkg/securitycontext/util.go b/pkg/securitycontext/util.go index 64bf7e53ecd..fcdf0ae467d 100644 --- a/pkg/securitycontext/util.go +++ b/pkg/securitycontext/util.go @@ -16,7 +16,12 @@ limitations under the License. package securitycontext -import "github.com/GoogleCloudPlatform/kubernetes/pkg/api" +import ( + "fmt" + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" +) // HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account // the possibility of nils @@ -41,3 +46,23 @@ func HasCapabilitiesRequest(container *api.Container) bool { } return len(container.SecurityContext.Capabilities.Add) > 0 || len(container.SecurityContext.Capabilities.Drop) > 0 } + +const expectedSELinuxContextFields = 4 + +// ParseSELinuxOptions parses a string containing a full SELinux context +// (user, role, type, and level) into an SELinuxOptions object. If the +// context is malformed, an error is returned. +func ParseSELinuxOptions(context string) (*api.SELinuxOptions, error) { + fields := strings.SplitN(context, ":", expectedSELinuxContextFields) + + if len(fields) != expectedSELinuxContextFields { + return nil, fmt.Errorf("expected %v fields in selinuxcontext; got %v (context: %v)", expectedSELinuxContextFields, len(fields), context) + } + + return &api.SELinuxOptions{ + User: fields[0], + Role: fields[1], + Type: fields[2], + Level: fields[3], + }, nil +} diff --git a/pkg/securitycontext/util_test.go b/pkg/securitycontext/util_test.go new file mode 100644 index 00000000000..978e1a6850f --- /dev/null +++ b/pkg/securitycontext/util_test.go @@ -0,0 +1,85 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +import ( + "testing" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" +) + +func TestParseSELinuxOptions(t *testing.T) { + cases := []struct { + name string + input string + expected *api.SELinuxOptions + }{ + { + name: "simple", + input: "user_t:role_t:type_t:s0", + expected: &api.SELinuxOptions{ + User: "user_t", + Role: "role_t", + Type: "type_t", + Level: "s0", + }, + }, + { + name: "simple + categories", + input: "user_t:role_t:type_t:s0:c0", + expected: &api.SELinuxOptions{ + User: "user_t", + Role: "role_t", + Type: "type_t", + Level: "s0:c0", + }, + }, + { + name: "not enough fields", + input: "type_t:s0:c0", + }, + } + + for _, tc := range cases { + result, err := ParseSELinuxOptions(tc.input) + + if err != nil { + if tc.expected == nil { + continue + } else { + t.Errorf("%v: unexpected error: %v", tc.name, err) + } + } + + compareContexts(tc.name, tc.expected, result, t) + } +} + +func compareContexts(name string, ex, ac *api.SELinuxOptions, t *testing.T) { + if e, a := ex.User, ac.User; e != a { + t.Errorf("%v: expected user: %v, got: %v", name, e, a) + } + if e, a := ex.Role, ac.Role; e != a { + t.Errorf("%v: expected role: %v, got: %v", name, e, a) + } + if e, a := ex.Type, ac.Type; e != a { + t.Errorf("%v: expected type: %v, got: %v", name, e, a) + } + if e, a := ex.Level, ac.Level; e != a { + t.Errorf("%v: expected level: %v, got: %v", name, e, a) + } +} diff --git a/pkg/volume/empty_dir/chcon_runner.go b/pkg/volume/empty_dir/chcon_runner.go new file mode 100644 index 00000000000..e18aefce6b1 --- /dev/null +++ b/pkg/volume/empty_dir/chcon_runner.go @@ -0,0 +1,27 @@ +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package empty_dir + +// chconRunner knows how to chcon a directory. +type chconRunner interface { + SetContext(dir, context string) error +} + +// newChconRunner returns a new chconRunner. +func newChconRunner() chconRunner { + return &realChconRunner{} +} diff --git a/pkg/volume/empty_dir/chcon_runner_linux.go b/pkg/volume/empty_dir/chcon_runner_linux.go new file mode 100644 index 00000000000..42abba25015 --- /dev/null +++ b/pkg/volume/empty_dir/chcon_runner_linux.go @@ -0,0 +1,34 @@ +// +build linux + +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package empty_dir + +import ( + "github.com/docker/libcontainer/selinux" +) + +type realChconRunner struct{} + +func (_ *realChconRunner) SetContext(dir, context string) error { + // If SELinux is not enabled, return an empty string + if !selinux.SelinuxEnabled() { + return nil + } + + return selinux.Setfilecon(dir, context) +} diff --git a/pkg/volume/empty_dir/chcon_runner_unsupported.go b/pkg/volume/empty_dir/chcon_runner_unsupported.go new file mode 100644 index 00000000000..4b75ef9d305 --- /dev/null +++ b/pkg/volume/empty_dir/chcon_runner_unsupported.go @@ -0,0 +1,26 @@ +// +build !linux + +/* +Copyright 2014 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package empty_dir + +type realChconRunner struct{} + +func (_ *realChconRunner) SetContext(dir, context string) error { + // NOP + return nil +} diff --git a/pkg/volume/empty_dir/empty_dir.go b/pkg/volume/empty_dir/empty_dir.go index 6a189a62841..0c4cca2f48d 100644 --- a/pkg/volume/empty_dir/empty_dir.go +++ b/pkg/volume/empty_dir/empty_dir.go @@ -19,15 +19,24 @@ package empty_dir import ( "fmt" "os" + "path" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" + volumeutil "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/util" "github.com/golang/glog" ) +// TODO: in the near future, this will be changed to be more restrictive +// and the group will be set to allow containers to use emptyDir volumes +// from the group attribute. +// +// https://github.com/GoogleCloudPlatform/kubernetes/issues/2630 +const perm os.FileMode = 0777 + // This is the primary entrypoint for volume plugins. func ProbeVolumePlugins() []volume.VolumePlugin { return []volume.VolumePlugin{ @@ -61,22 +70,23 @@ func (plugin *emptyDirPlugin) CanSupport(spec *volume.Spec) bool { } func (plugin *emptyDirPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) { - return plugin.newBuilderInternal(spec, pod, mounter, &realMountDetector{mounter}, opts) + return plugin.newBuilderInternal(spec, pod, mounter, &realMountDetector{mounter}, opts, newChconRunner()) } -func (plugin *emptyDirPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Builder, error) { +func (plugin *emptyDirPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions, chconRunner chconRunner) (volume.Builder, error) { medium := api.StorageMediumDefault if spec.VolumeSource.EmptyDir != nil { // Support a non-specified source as EmptyDir. medium = spec.VolumeSource.EmptyDir.Medium } return &emptyDir{ - podUID: pod.UID, + pod: pod, volName: spec.Name, medium: medium, mounter: mounter, mountDetector: mountDetector, plugin: plugin, rootContext: opts.RootContext, + chconRunner: chconRunner, }, nil } @@ -87,7 +97,7 @@ func (plugin *emptyDirPlugin) NewCleaner(volName string, podUID types.UID, mount func (plugin *emptyDirPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Cleaner, error) { ed := &emptyDir{ - podUID: podUID, + pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}}, volName: volName, medium: api.StorageMediumDefault, // might be changed later mounter: mounter, @@ -117,13 +127,14 @@ const ( // EmptyDir volumes are temporary directories exposed to the pod. // These do not persist beyond the lifetime of a pod. type emptyDir struct { - podUID types.UID + pod *api.Pod volName string medium api.StorageMedium mounter mount.Interface mountDetector mountDetector plugin *emptyDirPlugin rootContext string + chconRunner chconRunner } // SetUp creates new directory. @@ -133,29 +144,58 @@ func (ed *emptyDir) SetUp() error { // SetUpAt creates new directory. func (ed *emptyDir) SetUpAt(dir string) error { + isMnt, err := ed.mounter.IsMountPoint(dir) + // Getting an os.IsNotExist err from is a contingency; the directory + // may not exist yet, in which case, setup should run. + if err != nil && !os.IsNotExist(err) { + return err + } + + // If the plugin readiness file is present for this volume, and the + // storage medium is the default, then the volume is ready. If the + // medium is memory, and a mountpoint is present, then the volume is + // ready. + if volumeutil.IsReady(ed.getMetaDir()) { + if ed.medium == api.StorageMediumMemory && isMnt { + return nil + } else if ed.medium == api.StorageMediumDefault { + return nil + } + } + + // Determine the effective SELinuxOptions to use for this volume. + securityContext := "" + if selinuxEnabled() { + securityContext = ed.rootContext + } + switch ed.medium { case api.StorageMediumDefault: - return ed.setupDefault(dir) + err = ed.setupDir(dir, securityContext) case api.StorageMediumMemory: - return ed.setupTmpfs(dir) + err = ed.setupTmpfs(dir, securityContext) default: - return fmt.Errorf("unknown storage medium %q", ed.medium) + err = fmt.Errorf("unknown storage medium %q", ed.medium) } + + if err == nil { + volumeutil.SetReady(ed.getMetaDir()) + } + + return err } func (ed *emptyDir) IsReadOnly() bool { return false } -func (ed *emptyDir) setupDefault(dir string) error { - return os.MkdirAll(dir, 0750) -} - -func (ed *emptyDir) setupTmpfs(dir string) error { +// setupTmpfs creates a tmpfs mount at the specified directory with the +// specified SELinux context. +func (ed *emptyDir) setupTmpfs(dir string, selinuxContext string) error { if ed.mounter == nil { return fmt.Errorf("memory storage requested, but mounter is nil") } - if err := os.MkdirAll(dir, 0750); err != nil { + if err := ed.setupDir(dir, selinuxContext); err != nil { return err } // Make SetUp idempotent. @@ -170,28 +210,66 @@ func (ed *emptyDir) setupTmpfs(dir string) error { } // By default a tmpfs mount will receive a different SELinux context - // from that of the Kubelet root directory which is not readable from - // the SELinux context of a docker container. - // - // getTmpfsMountOptions gets the mount option to set the context of - // the tmpfs mount so that it can be read from the SELinux context of - // the container. - opts := ed.getTmpfsMountOptions() - glog.V(3).Infof("pod %v: mounting tmpfs for volume %v with opts %v", ed.podUID, ed.volName, opts) + // which is not readable from the SELinux context of a docker container. + var opts []string + if selinuxContext != "" { + opts = []string{fmt.Sprintf("rootcontext=\"%v\"", selinuxContext)} + } else { + opts = []string{} + } + + glog.V(3).Infof("pod %v: mounting tmpfs for volume %v with opts %v", ed.pod.UID, ed.volName, opts) return ed.mounter.Mount("tmpfs", dir, "tmpfs", opts) } -func (ed *emptyDir) getTmpfsMountOptions() []string { - if ed.rootContext == "" { - return []string{""} +// setupDir creates the directory with the specified SELinux context and +// the default permissions specified by the perm constant. +func (ed *emptyDir) setupDir(dir, selinuxContext string) error { + // Create the directory if it doesn't already exist. + if err := os.MkdirAll(dir, perm); err != nil { + return err } - return []string{fmt.Sprintf("rootcontext=\"%v\"", ed.rootContext)} + // stat the directory to read permission bits + fileinfo, err := os.Lstat(dir) + if err != nil { + return err + } + + if fileinfo.Mode().Perm() != perm.Perm() { + // If the permissions on the created directory are wrong, the + // kubelet is probably running with a umask set. In order to + // avoid clearing the umask for the entire process or locking + // the thread, clearing the umask, creating the dir, restoring + // the umask, and unlocking the thread, we do a chmod to set + // the specific bits we need. + err := os.Chmod(dir, perm) + if err != nil { + return err + } + + fileinfo, err = os.Lstat(dir) + if err != nil { + return err + } + + if fileinfo.Mode().Perm() != perm.Perm() { + glog.Errorf("Expected directory %q permissions to be: %s; got: %s", dir, perm.Perm(), fileinfo.Mode().Perm()) + } + } + + // Set the context on the directory, if appropriate + if selinuxContext != "" { + glog.V(3).Infof("Setting SELinux context for %v to %v", dir, selinuxContext) + return ed.chconRunner.SetContext(dir, selinuxContext) + } + + return nil } func (ed *emptyDir) GetPath() string { name := emptyDirPluginName - return ed.plugin.host.GetPodVolumeDir(ed.podUID, util.EscapeQualifiedNameForDisk(name), ed.volName) + return ed.plugin.host.GetPodVolumeDir(ed.pod.UID, util.EscapeQualifiedNameForDisk(name), ed.volName) } // TearDown simply discards everything in the directory. @@ -238,3 +316,7 @@ func (ed *emptyDir) teardownTmpfs(dir string) error { } return nil } + +func (ed *emptyDir) getMetaDir() string { + return path.Join(ed.plugin.host.GetPodPluginDir(ed.pod.UID, util.EscapeQualifiedNameForDisk(emptyDirPluginName)), ed.volName) +} diff --git a/pkg/volume/empty_dir/empty_dir_linux.go b/pkg/volume/empty_dir/empty_dir_linux.go index aaf2a71fc54..57b5e5fd211 100644 --- a/pkg/volume/empty_dir/empty_dir_linux.go +++ b/pkg/volume/empty_dir/empty_dir_linux.go @@ -23,6 +23,7 @@ import ( "syscall" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" + "github.com/docker/libcontainer/selinux" "github.com/golang/glog" ) @@ -51,3 +52,8 @@ func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, er } return mediumUnknown, isMnt, nil } + +// selinuxEnabled determines whether SELinux is enabled. +func selinuxEnabled() bool { + return selinux.SelinuxEnabled() +} diff --git a/pkg/volume/empty_dir/empty_dir_test.go b/pkg/volume/empty_dir/empty_dir_test.go index e61e61baffa..9c3aba54351 100644 --- a/pkg/volume/empty_dir/empty_dir_test.go +++ b/pkg/volume/empty_dir/empty_dir_test.go @@ -17,6 +17,7 @@ limitations under the License. package empty_dir import ( + "io/ioutil" "os" "path" "testing" @@ -25,13 +26,11 @@ import ( "github.com/GoogleCloudPlatform/kubernetes/pkg/types" "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" "github.com/GoogleCloudPlatform/kubernetes/pkg/volume" + "github.com/GoogleCloudPlatform/kubernetes/pkg/volume/util" ) -// The dir where volumes will be stored. -const basePath = "/tmp/fake" - // Construct an instance of a plugin, by name. -func makePluginUnderTest(t *testing.T, plugName string) volume.VolumePlugin { +func makePluginUnderTest(t *testing.T, plugName, basePath string) volume.VolumePlugin { plugMgr := volume.VolumePluginMgr{} plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(basePath, nil, nil)) @@ -43,7 +42,7 @@ func makePluginUnderTest(t *testing.T, plugName string) volume.VolumePlugin { } func TestCanSupport(t *testing.T) { - plug := makePluginUnderTest(t, "kubernetes.io/empty-dir") + plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", "/tmp/fake") if plug.Name() != "kubernetes.io/empty-dir" { t.Errorf("Wrong name: %s", plug.Name()) @@ -65,77 +64,132 @@ func (fake *fakeMountDetector) GetMountMedium(path string) (storageMedium, bool, return fake.medium, fake.isMount, nil } -func TestPlugin(t *testing.T) { - plug := makePluginUnderTest(t, "kubernetes.io/empty-dir") +type fakeChconRequest struct { + dir string + context string +} - spec := &api.Volume{ - Name: "vol1", - VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumDefault}}, - } - mounter := mount.FakeMounter{} - mountDetector := fakeMountDetector{} - pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} - builder, err := plug.(*emptyDirPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), pod, &mounter, &mountDetector, volume.VolumeOptions{""}) - if err != nil { - t.Errorf("Failed to make a new Builder: %v", err) - } - if builder == nil { - t.Errorf("Got a nil Builder") +type fakeChconRunner struct { + requests []fakeChconRequest +} + +func newFakeChconRunner() *fakeChconRunner { + return &fakeChconRunner{} +} + +func (f *fakeChconRunner) SetContext(dir, context string) error { + f.requests = append(f.requests, fakeChconRequest{dir, context}) + + return nil +} + +func TestPluginEmptyRootContext(t *testing.T) { + doTestPlugin(t, pluginTestConfig{ + medium: api.StorageMediumDefault, + rootContext: "", + expectedChcons: 0, + expectedSetupMounts: 0, + expectedTeardownMounts: 0}) +} + +func TestPluginRootContextSet(t *testing.T) { + if !selinuxEnabled() { + return } - volPath := builder.GetPath() - if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") { - t.Errorf("Got unexpected path: %s", volPath) - } - - if err := builder.SetUp(); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(volPath); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", volPath) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - if len(mounter.Log) != 0 { - t.Errorf("Expected 0 mounter calls, got %#v", mounter.Log) - } - mounter.ResetLog() - - cleaner, err := plug.(*emptyDirPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mounter, &fakeMountDetector{}) - if err != nil { - t.Errorf("Failed to make a new Cleaner: %v", err) - } - if cleaner == nil { - t.Errorf("Got a nil Cleaner") - } - - if err := cleaner.TearDown(); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(volPath); err == nil { - t.Errorf("TearDown() failed, volume path still exists: %s", volPath) - } else if !os.IsNotExist(err) { - t.Errorf("SetUp() failed: %v", err) - } - if len(mounter.Log) != 0 { - t.Errorf("Expected 0 mounter calls, got %#v", mounter.Log) - } - mounter.ResetLog() + doTestPlugin(t, pluginTestConfig{ + medium: api.StorageMediumDefault, + rootContext: "user:role:type:range", + expectedSELinuxContext: "user:role:type:range", + expectedChcons: 1, + expectedSetupMounts: 0, + expectedTeardownMounts: 0}) } func TestPluginTmpfs(t *testing.T) { - plug := makePluginUnderTest(t, "kubernetes.io/empty-dir") - - spec := &api.Volume{ - Name: "vol1", - VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}, + if !selinuxEnabled() { + return } - mounter := mount.FakeMounter{} - mountDetector := fakeMountDetector{} - pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} - builder, err := plug.(*emptyDirPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), pod, &mounter, &mountDetector, volume.VolumeOptions{""}) + + doTestPlugin(t, pluginTestConfig{ + medium: api.StorageMediumMemory, + rootContext: "user:role:type:range", + expectedSELinuxContext: "user:role:type:range", + expectedChcons: 1, + expectedSetupMounts: 1, + shouldBeMountedBeforeTeardown: true, + expectedTeardownMounts: 1}) +} + +type pluginTestConfig struct { + medium api.StorageMedium + rootContext string + SELinuxOptions *api.SELinuxOptions + idempotent bool + expectedSELinuxContext string + expectedChcons int + expectedSetupMounts int + shouldBeMountedBeforeTeardown bool + expectedTeardownMounts int +} + +// doTestPlugin sets up a volume and tears it back down. +func doTestPlugin(t *testing.T, config pluginTestConfig) { + basePath, err := ioutil.TempDir("/tmp", "emptydir_volume_test") + if err != nil { + t.Fatalf("can't make a temp rootdir") + } + + var ( + volumePath = path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/test-volume") + metadataDir = path.Join(basePath, "pods/poduid/plugins/kubernetes.io~empty-dir/test-volume") + + plug = makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath) + volumeName = "test-volume" + spec = &api.Volume{ + Name: volumeName, + VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}}, + } + + mounter = mount.FakeMounter{} + mountDetector = fakeMountDetector{} + pod = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} + fakeChconRnr = &fakeChconRunner{} + ) + + // Set up the SELinux options on the pod + if config.SELinuxOptions != nil { + pod.Spec = api.PodSpec{ + Containers: []api.Container{ + { + SecurityContext: &api.SecurityContext{ + SELinuxOptions: config.SELinuxOptions, + }, + VolumeMounts: []api.VolumeMount{ + { + Name: volumeName, + }, + }, + }, + }, + } + } + + if config.idempotent { + mounter.MountPoints = []mount.MountPoint{ + { + Path: volumePath, + }, + } + util.SetReady(metadataDir) + } + + builder, err := plug.(*emptyDirPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), + pod, + &mounter, + &mountDetector, + volume.VolumeOptions{config.rootContext}, + fakeChconRnr) if err != nil { t.Errorf("Failed to make a new Builder: %v", err) } @@ -144,30 +198,62 @@ func TestPluginTmpfs(t *testing.T) { } volPath := builder.GetPath() - if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") { + if volPath != volumePath { t.Errorf("Got unexpected path: %s", volPath) } if err := builder.SetUp(); err != nil { t.Errorf("Expected success, got: %v", err) } - if _, err := os.Stat(volPath); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", volPath) - } else { - t.Errorf("SetUp() failed: %v", err) + + // Stat the directory and check the permission bits + fileinfo, err := os.Stat(volPath) + if !config.idempotent { + if err != nil { + if os.IsNotExist(err) { + t.Errorf("SetUp() failed, volume path not created: %s", volPath) + } else { + t.Errorf("SetUp() failed: %v", err) + } + } + if e, a := perm, fileinfo.Mode().Perm(); e != a { + t.Errorf("Unexpected file mode for %v: expected: %v, got: %v", volPath, e, a) + } + } else if err == nil { + // If this test is for idempotency and we were able + // to stat the volume path, it's an error. + t.Errorf("Volume directory was created unexpectedly") + } + + // Check the number of chcons during setup + if e, a := config.expectedChcons, len(fakeChconRnr.requests); e != a { + t.Errorf("Expected %v chcon calls, got %v", e, a) + } + if config.expectedChcons == 1 { + if e, a := config.expectedSELinuxContext, fakeChconRnr.requests[0].context; e != a { + t.Errorf("Unexpected chcon context argument; expected: %v, got: %v", e, a) + } + if e, a := volPath, fakeChconRnr.requests[0].dir; e != a { + t.Errorf("Unexpected chcon path argument: expected: %v, got: %v", e, a) } } - if len(mounter.Log) != 1 { - t.Errorf("Expected 1 mounter call, got %#v", mounter.Log) - } else { - if mounter.Log[0].Action != mount.FakeActionMount || mounter.Log[0].FSType != "tmpfs" { - t.Errorf("Unexpected mounter action: %#v", mounter.Log[0]) - } + + // Check the number of mounts performed during setup + if e, a := config.expectedSetupMounts, len(mounter.Log); e != a { + t.Errorf("Expected %v mounter calls during setup, got %v", e, a) + } else if config.expectedSetupMounts == 1 && + (mounter.Log[0].Action != mount.FakeActionMount || mounter.Log[0].FSType != "tmpfs") { + t.Errorf("Unexpected mounter action during setup: %#v", mounter.Log[0]) } mounter.ResetLog() - cleaner, err := plug.(*emptyDirPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mounter, &fakeMountDetector{mediumMemory, true}) + // Make a cleaner for the volume + teardownMedium := mediumUnknown + if config.medium == api.StorageMediumMemory { + teardownMedium = mediumMemory + } + cleanerMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown} + cleaner, err := plug.(*emptyDirPlugin).newCleanerInternal(volumeName, types.UID("poduid"), &mounter, cleanerMountDetector) if err != nil { t.Errorf("Failed to make a new Cleaner: %v", err) } @@ -175,6 +261,7 @@ func TestPluginTmpfs(t *testing.T) { t.Errorf("Got a nil Cleaner") } + // Tear down the volume if err := cleaner.TearDown(); err != nil { t.Errorf("Expected success, got: %v", err) } @@ -183,18 +270,19 @@ func TestPluginTmpfs(t *testing.T) { } else if !os.IsNotExist(err) { t.Errorf("SetUp() failed: %v", err) } - if len(mounter.Log) != 1 { - t.Errorf("Expected 1 mounter call, got %d (%v)", len(mounter.Log), mounter.Log) - } else { - if mounter.Log[0].Action != mount.FakeActionUnmount { - t.Errorf("Unexpected mounter action: %#v", mounter.Log[0]) - } + + // Check the number of mounter calls during tardown + if e, a := config.expectedTeardownMounts, len(mounter.Log); e != a { + t.Errorf("Expected %v mounter calls during teardown, got %v", e, a) + } else if config.expectedTeardownMounts == 1 && mounter.Log[0].Action != mount.FakeActionUnmount { + t.Errorf("Unexpected mounter action during teardown: %#v", mounter.Log[0]) } mounter.ResetLog() } func TestPluginBackCompat(t *testing.T) { - plug := makePluginUnderTest(t, "kubernetes.io/empty-dir") + basePath := "/tmp/fake" + plug := makePluginUnderTest(t, "kubernetes.io/empty-dir", basePath) spec := &api.Volume{ Name: "vol1", diff --git a/pkg/volume/empty_dir/empty_dir_unsupported.go b/pkg/volume/empty_dir/empty_dir_unsupported.go index 7589fe7d332..845fdea3c1a 100644 --- a/pkg/volume/empty_dir/empty_dir_unsupported.go +++ b/pkg/volume/empty_dir/empty_dir_unsupported.go @@ -18,7 +18,9 @@ limitations under the License. package empty_dir -import "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" +import ( + "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount" +) // realMountDetector pretends to implement mediumer. type realMountDetector struct { @@ -28,3 +30,7 @@ type realMountDetector struct { func (m *realMountDetector) GetMountMedium(path string) (storageMedium, bool, error) { return mediumUnknown, false, nil } + +func selinuxEnabled() bool { + return false +} diff --git a/test/e2e/empty_dir.go b/test/e2e/empty_dir.go index 473789ad5da..2c3fdaf8f7a 100644 --- a/test/e2e/empty_dir.go +++ b/test/e2e/empty_dir.go @@ -27,51 +27,182 @@ import ( . "github.com/onsi/ginkgo" ) +const ( + testImageRootUid = "gcr.io/google_containers/mounttest:0.3" + testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.1" +) + var _ = Describe("EmptyDir volumes", func() { f := NewFramework("emptydir") - It("should have the correct mode", func() { - volumePath := "/test-volume" - source := &api.EmptyDirVolumeSource{ - Medium: api.StorageMediumMemory, - } - pod := testPodWithVolume(volumePath, source) - - pod.Spec.Containers[0].Args = []string{ - fmt.Sprintf("--fs_type=%v", volumePath), - fmt.Sprintf("--file_mode=%v", volumePath), - } - f.TestContainerOutput("emptydir r/w on tmpfs", pod, 0, []string{ - "mount type of \"/test-volume\": tmpfs", - "mode of file \"/test-volume\": dtrwxrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir - }) + It("volume on tmpfs should have the correct mode", func() { + doTestVolumeMode(f, testImageRootUid, api.StorageMediumMemory) }) - It("should support r/w", func() { - volumePath := "/test-volume" - filePath := path.Join(volumePath, "test-file") - source := &api.EmptyDirVolumeSource{ - Medium: api.StorageMediumMemory, - } - pod := testPodWithVolume(volumePath, source) + It("should support (root,0644,tmpfs)", func() { + doTest0644(f, testImageRootUid, api.StorageMediumMemory) + }) - pod.Spec.Containers[0].Args = []string{ - fmt.Sprintf("--fs_type=%v", volumePath), - fmt.Sprintf("--rw_new_file=%v", filePath), - fmt.Sprintf("--file_mode=%v", filePath), - } - f.TestContainerOutput("emptydir r/w on tmpfs", pod, 0, []string{ - "mount type of \"/test-volume\": tmpfs", - "mode of file \"/test-volume/test-file\": -rw-r--r--", - "content of file \"/test-volume/test-file\": mount-tester new file", - }) + It("should support (root,0666,tmpfs)", func() { + doTest0666(f, testImageRootUid, api.StorageMediumMemory) + }) + + It("should support (root,0777,tmpfs)", func() { + doTest0777(f, testImageRootUid, api.StorageMediumMemory) + }) + + It("should support (non-root,0644,tmpfs)", func() { + doTest0644(f, testImageNonRootUid, api.StorageMediumMemory) + }) + + It("should support (non-root,0666,tmpfs)", func() { + doTest0666(f, testImageNonRootUid, api.StorageMediumMemory) + }) + + It("should support (non-root,0777,tmpfs)", func() { + doTest0777(f, testImageNonRootUid, api.StorageMediumMemory) + }) + + It("volume on default medium should have the correct mode", func() { + doTestVolumeMode(f, testImageRootUid, api.StorageMediumDefault) + }) + + It("should support (root,0644,default)", func() { + doTest0644(f, testImageRootUid, api.StorageMediumDefault) + }) + + It("should support (root,0666,default)", func() { + doTest0666(f, testImageRootUid, api.StorageMediumDefault) + }) + + It("should support (root,0777,default)", func() { + doTest0777(f, testImageRootUid, api.StorageMediumDefault) + }) + + It("should support (non-root,0644,default)", func() { + doTest0644(f, testImageNonRootUid, api.StorageMediumDefault) + }) + + It("should support (non-root,0666,default)", func() { + doTest0666(f, testImageNonRootUid, api.StorageMediumDefault) + }) + + It("should support (non-root,0777,default)", func() { + doTest0777(f, testImageNonRootUid, api.StorageMediumDefault) }) }) -const containerName = "test-container" -const volumeName = "test-volume" +const ( + containerName = "test-container" + volumeName = "test-volume" +) -func testPodWithVolume(path string, source *api.EmptyDirVolumeSource) *api.Pod { +func doTestVolumeMode(f *Framework, image string, medium api.StorageMedium) { + var ( + volumePath = "/test-volume" + source = &api.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(testImageRootUid, volumePath, source) + ) + + pod.Spec.Containers[0].Args = []string{ + fmt.Sprintf("--fs_type=%v", volumePath), + fmt.Sprintf("--file_perm=%v", volumePath), + } + + msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium)) + out := []string{ + "perms of file \"/test-volume\": -rwxrwxrwx", + } + if medium == api.StorageMediumMemory { + out = append(out, "mount type of \"/test-volume\": tmpfs") + } + f.TestContainerOutput(msg, pod, 0, out) +} + +func doTest0644(f *Framework, image string, medium api.StorageMedium) { + var ( + volumePath = "/test-volume" + filePath = path.Join(volumePath, "test-file") + source = &api.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) + ) + + pod.Spec.Containers[0].Args = []string{ + fmt.Sprintf("--fs_type=%v", volumePath), + fmt.Sprintf("--new_file_0644=%v", filePath), + fmt.Sprintf("--file_perm=%v", filePath), + } + + msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium)) + out := []string{ + "perms of file \"/test-volume/test-file\": -rw-r--r--", + "content of file \"/test-volume/test-file\": mount-tester new file", + } + if medium == api.StorageMediumMemory { + out = append(out, "mount type of \"/test-volume\": tmpfs") + } + f.TestContainerOutput(msg, pod, 0, out) +} + +func doTest0666(f *Framework, image string, medium api.StorageMedium) { + var ( + volumePath = "/test-volume" + filePath = path.Join(volumePath, "test-file") + source = &api.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) + ) + + pod.Spec.Containers[0].Args = []string{ + fmt.Sprintf("--fs_type=%v", volumePath), + fmt.Sprintf("--new_file_0666=%v", filePath), + fmt.Sprintf("--file_perm=%v", filePath), + } + + msg := fmt.Sprintf("emptydir 0666 on %v", formatMedium(medium)) + out := []string{ + "perms of file \"/test-volume/test-file\": -rw-rw-rw-", + "content of file \"/test-volume/test-file\": mount-tester new file", + } + if medium == api.StorageMediumMemory { + out = append(out, "mount type of \"/test-volume\": tmpfs") + } + f.TestContainerOutput(msg, pod, 0, out) +} + +func doTest0777(f *Framework, image string, medium api.StorageMedium) { + var ( + volumePath = "/test-volume" + filePath = path.Join(volumePath, "test-file") + source = &api.EmptyDirVolumeSource{Medium: medium} + pod = testPodWithVolume(image, volumePath, source) + ) + + pod.Spec.Containers[0].Args = []string{ + fmt.Sprintf("--fs_type=%v", volumePath), + fmt.Sprintf("--new_file_0777=%v", filePath), + fmt.Sprintf("--file_perm=%v", filePath), + } + + msg := fmt.Sprintf("emptydir 0777 on %v", formatMedium(medium)) + out := []string{ + "perms of file \"/test-volume/test-file\": -rwxrwxrwx", + "content of file \"/test-volume/test-file\": mount-tester new file", + } + if medium == api.StorageMediumMemory { + out = append(out, "mount type of \"/test-volume\": tmpfs") + } + f.TestContainerOutput(msg, pod, 0, out) +} + +func formatMedium(medium api.StorageMedium) string { + if medium == api.StorageMediumMemory { + return "tmpfs" + } + + return "node default medium" +} + +func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *api.Pod { podName := "pod-" + string(util.NewUUID()) return &api.Pod{ @@ -86,7 +217,7 @@ func testPodWithVolume(path string, source *api.EmptyDirVolumeSource) *api.Pod { Containers: []api.Container{ { Name: containerName, - Image: "gcr.io/google_containers/mounttest:0.2", + Image: image, VolumeMounts: []api.VolumeMount{ { Name: volumeName, From 9bec48298b9a00711ec85e7b8563a607e9b1c781 Mon Sep 17 00:00:00 2001 From: Brendan Burns Date: Fri, 24 Jul 2015 16:44:04 -0700 Subject: [PATCH 34/49] Add an initial (simple) implementation of a submit queue. --- contrib/submit-queue/github/github.go | 283 +++++++++++++++ contrib/submit-queue/github/github_test.go | 390 +++++++++++++++++++++ contrib/submit-queue/jenkins/jenkins.go | 91 +++++ contrib/submit-queue/submit-queue.go | 162 +++++++++ contrib/submit-queue/whitelist.txt | 41 +++ 5 files changed, 967 insertions(+) create mode 100644 contrib/submit-queue/github/github.go create mode 100644 contrib/submit-queue/github/github_test.go create mode 100644 contrib/submit-queue/jenkins/jenkins.go create mode 100644 contrib/submit-queue/submit-queue.go create mode 100644 contrib/submit-queue/whitelist.txt diff --git a/contrib/submit-queue/github/github.go b/contrib/submit-queue/github/github.go new file mode 100644 index 00000000000..63e598d3f5c --- /dev/null +++ b/contrib/submit-queue/github/github.go @@ -0,0 +1,283 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package github + +import ( + "fmt" + "time" + + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" + + "github.com/golang/glog" + "github.com/google/go-github/github" + "golang.org/x/oauth2" +) + +func MakeClient(token string) *github.Client { + if len(token) > 0 { + ts := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}) + tc := oauth2.NewClient(oauth2.NoContext, ts) + return github.NewClient(tc) + } + return github.NewClient(nil) +} + +func hasLabel(labels []github.Label, name string) bool { + for i := range labels { + label := &labels[i] + if label.Name != nil && *label.Name == name { + return true + } + } + return false +} + +func hasLabels(labels []github.Label, names []string) bool { + for i := range names { + if !hasLabel(labels, names[i]) { + return false + } + } + return true +} + +func fetchAllPRs(client *github.Client, user, project string) ([]github.PullRequest, error) { + page := 1 + var result []github.PullRequest + for { + glog.V(4).Infof("Fetching page %d", page) + listOpts := &github.PullRequestListOptions{ + Sort: "desc", + ListOptions: github.ListOptions{PerPage: 100, Page: page}, + } + prs, response, err := client.PullRequests.List(user, project, listOpts) + if err != nil { + return nil, err + } + result = append(result, prs...) + if response.LastPage == 0 || response.LastPage == page { + break + } + page++ + } + return result, nil +} + +type PRFunction func(*github.Client, *github.PullRequest, *github.Issue) error + +type FilterConfig struct { + MinPRNumber int + UserWhitelist []string + WhitelistOverride string + RequiredStatusContexts []string +} + +// For each PR in the project that matches: +// * pr.Number > minPRNumber +// * is mergeable +// * has labels "cla: yes", "lgtm" +// * combinedStatus = 'success' (e.g. all hooks have finished success in github) +// Run the specified function +func ForEachCandidatePRDo(client *github.Client, user, project string, fn PRFunction, once bool, config *FilterConfig) error { + // Get all PRs + prs, err := fetchAllPRs(client, user, project) + if err != nil { + return err + } + + userSet := util.StringSet{} + userSet.Insert(config.UserWhitelist...) + + for ix := range prs { + if prs[ix].User == nil || prs[ix].User.Login == nil { + glog.V(2).Infof("Skipping PR %d with no user info %v.", *prs[ix].Number, *prs[ix].User) + continue + } + if *prs[ix].Number < config.MinPRNumber { + glog.V(6).Infof("Dropping %d < %d", *prs[ix].Number, config.MinPRNumber) + continue + } + pr, _, err := client.PullRequests.Get(user, project, *prs[ix].Number) + if err != nil { + glog.Errorf("Error getting pull request: %v", err) + continue + } + glog.V(2).Infof("----==== %d ====----", *pr.Number) + + // Labels are actually stored in the Issues API, not the Pull Request API + issue, _, err := client.Issues.Get(user, project, *pr.Number) + if err != nil { + glog.Errorf("Failed to get issue for PR: %v", err) + continue + } + glog.V(8).Infof("%v", issue.Labels) + if !hasLabels(issue.Labels, []string{"lgtm", "cla: yes"}) { + continue + } + if !hasLabel(issue.Labels, config.WhitelistOverride) && !userSet.Has(*prs[ix].User.Login) { + glog.V(4).Infof("Dropping %d since %s isn't in whitelist and %s isn't present", *prs[ix].Number, *prs[ix].User.Login, config.WhitelistOverride) + continue + } + + // This is annoying, github appears to only temporarily cache mergeability, if it is nil, wait + // for an async refresh and retry. + if pr.Mergeable == nil { + glog.Infof("Waiting for mergeability on %s %d", *pr.Title, *pr.Number) + // TODO: determine what a good empirical setting for this is. + time.Sleep(10 * time.Second) + pr, _, err = client.PullRequests.Get(user, project, *prs[ix].Number) + } + if pr.Mergeable == nil { + glog.Errorf("No mergeability information for %s %d, Skipping.", *pr.Title, *pr.Number) + continue + } + if !*pr.Mergeable { + continue + } + + // Validate the status information for this PR + ok, err := ValidateStatus(client, user, project, *pr.Number, config.RequiredStatusContexts, false) + if err != nil { + glog.Errorf("Error validating PR status: %v", err) + continue + } + if !ok { + continue + } + if err := fn(client, pr, issue); err != nil { + glog.Errorf("Failed to run user function: %v", err) + continue + } + if once { + break + } + } + return nil +} + +func getCommitStatus(client *github.Client, user, project string, prNumber int) ([]*github.CombinedStatus, error) { + commits, _, err := client.PullRequests.ListCommits(user, project, prNumber, &github.ListOptions{}) + if err != nil { + return nil, err + } + commitStatus := make([]*github.CombinedStatus, len(commits)) + for ix := range commits { + commit := &commits[ix] + statusList, _, err := client.Repositories.GetCombinedStatus(user, project, *commit.SHA, &github.ListOptions{}) + if err != nil { + return nil, err + } + commitStatus[ix] = statusList + } + return commitStatus, nil +} + +// Gets the current status of a PR by introspecting the status of the commits in the PR. +// The rules are: +// * If any member of the 'requiredContexts' list is missing, it is 'incomplete' +// * If any commit is 'pending', the PR is 'pending' +// * If any commit is 'error', the PR is in 'error' +// * If any commit is 'failure', the PR is 'failure' +// * Otherwise the PR is 'success' +func GetStatus(client *github.Client, user, project string, prNumber int, requiredContexts []string) (string, error) { + statusList, err := getCommitStatus(client, user, project, prNumber) + if err != nil { + return "", err + } + return computeStatus(statusList, requiredContexts), nil +} + +func computeStatus(statusList []*github.CombinedStatus, requiredContexts []string) string { + states := util.StringSet{} + providers := util.StringSet{} + for ix := range statusList { + status := statusList[ix] + glog.V(8).Infof("Checking commit: %s", *status.SHA) + glog.V(8).Infof("Checking commit: %v", status) + states.Insert(*status.State) + + for _, subStatus := range status.Statuses { + glog.V(8).Infof("Found status from: %v", subStatus) + providers.Insert(*subStatus.Context) + } + } + for _, provider := range requiredContexts { + if !providers.Has(provider) { + glog.V(8).Infof("Failed to find %s in %v", provider, providers) + return "incomplete" + } + } + + switch { + case states.Has("pending"): + return "pending" + case states.Has("error"): + return "error" + case states.Has("failure"): + return "failure" + default: + return "success" + } +} + +// Make sure that the combined status for all commits in a PR is 'success' +// if 'waitForPending' is true, this function will wait until the PR is no longer pending (all checks have run) +func ValidateStatus(client *github.Client, user, project string, prNumber int, requiredContexts []string, waitOnPending bool) (bool, error) { + pending := true + for pending { + status, err := GetStatus(client, user, project, prNumber, requiredContexts) + if err != nil { + return false, err + } + switch status { + case "error", "failure": + return false, nil + case "pending": + if !waitOnPending { + return false, nil + } + pending = true + glog.V(4).Info("PR is pending, waiting for 30 seconds") + time.Sleep(30 * time.Second) + case "success": + return true, nil + case "incomplete": + return false, nil + default: + return false, fmt.Errorf("unknown status: %s", status) + } + } + return true, nil +} + +// Wait for a PR to move into Pending. This is useful because the request to test a PR again +// is asynchronous with the PR actually moving into a pending state +// TODO: add a timeout +func WaitForPending(client *github.Client, user, project string, prNumber int) error { + for { + status, err := GetStatus(client, user, project, prNumber, []string{}) + if err != nil { + return err + } + if status == "pending" { + return nil + } + glog.V(4).Info("PR is not pending, waiting for 30 seconds") + time.Sleep(30 * time.Second) + } + return nil +} diff --git a/contrib/submit-queue/github/github_test.go b/contrib/submit-queue/github/github_test.go new file mode 100644 index 00000000000..83449c9803a --- /dev/null +++ b/contrib/submit-queue/github/github_test.go @@ -0,0 +1,390 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package github + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + + "github.com/google/go-github/github" +) + +func stringPtr(val string) *string { return &val } + +func TestHasLabel(t *testing.T) { + tests := []struct { + labels []github.Label + label string + hasLabel bool + }{ + { + labels: []github.Label{ + {Name: stringPtr("foo")}, + }, + label: "foo", + hasLabel: true, + }, + { + labels: []github.Label{ + {Name: stringPtr("bar")}, + }, + label: "foo", + hasLabel: false, + }, + { + labels: []github.Label{ + {Name: stringPtr("bar")}, + {Name: stringPtr("foo")}, + }, + label: "foo", + hasLabel: true, + }, + { + labels: []github.Label{ + {Name: stringPtr("bar")}, + {Name: stringPtr("baz")}, + }, + label: "foo", + hasLabel: false, + }, + } + + for _, test := range tests { + if test.hasLabel != hasLabel(test.labels, test.label) { + t.Errorf("Unexpected output: %v", test) + } + } +} + +func TestHasLabels(t *testing.T) { + tests := []struct { + labels []github.Label + seekLabels []string + hasLabel bool + }{ + { + labels: []github.Label{ + {Name: stringPtr("foo")}, + }, + seekLabels: []string{"foo"}, + hasLabel: true, + }, + { + labels: []github.Label{ + {Name: stringPtr("bar")}, + }, + seekLabels: []string{"foo"}, + hasLabel: false, + }, + { + labels: []github.Label{ + {Name: stringPtr("bar")}, + {Name: stringPtr("foo")}, + }, + seekLabels: []string{"foo"}, + hasLabel: true, + }, + { + labels: []github.Label{ + {Name: stringPtr("bar")}, + {Name: stringPtr("baz")}, + }, + seekLabels: []string{"foo"}, + hasLabel: false, + }, + { + labels: []github.Label{ + {Name: stringPtr("foo")}, + }, + seekLabels: []string{"foo", "bar"}, + hasLabel: false, + }, + } + + for _, test := range tests { + if test.hasLabel != hasLabels(test.labels, test.seekLabels) { + t.Errorf("Unexpected output: %v", test) + } + } +} + +func initTest() (*github.Client, *httptest.Server, *http.ServeMux) { + // test server + mux := http.NewServeMux() + server := httptest.NewServer(mux) + + // github client configured to use test server + client := github.NewClient(nil) + url, _ := url.Parse(server.URL) + client.BaseURL = url + client.UploadURL = url + + return client, server, mux +} + +func TestFetchAllPRs(t *testing.T) { + tests := []struct { + PullRequests [][]github.PullRequest + Pages []int + }{ + { + PullRequests: [][]github.PullRequest{ + { + {}, + }, + }, + Pages: []int{0}, + }, + { + PullRequests: [][]github.PullRequest{ + { + {}, + }, + { + {}, + }, + { + {}, + }, + { + {}, + }, + }, + Pages: []int{4, 4, 4, 0}, + }, + { + PullRequests: [][]github.PullRequest{ + { + {}, + }, + { + {}, + }, + { + {}, + {}, + {}, + }, + }, + Pages: []int{3, 3, 3, 0}, + }, + } + + for _, test := range tests { + client, server, mux := initTest() + count := 0 + prCount := 0 + mux.HandleFunc("/repos/foo/bar/pulls", func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + t.Errorf("Unexpected method: %s", r.Method) + } + if r.URL.Query().Get("page") != strconv.Itoa(count+1) { + t.Errorf("Unexpected page: %s", r.URL.Query().Get("page")) + } + if r.URL.Query().Get("sort") != "desc" { + t.Errorf("Unexpected sort: %s", r.URL.Query().Get("sort")) + } + if r.URL.Query().Get("per_page") != "100" { + t.Errorf("Unexpected per_page: %s", r.URL.Query().Get("per_page")) + } + w.Header().Add("Link", + fmt.Sprintf("; rel=\"last\"", test.Pages[count])) + w.WriteHeader(http.StatusOK) + data, err := json.Marshal(test.PullRequests[count]) + prCount += len(test.PullRequests[count]) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + + w.Write(data) + count++ + }) + prs, err := fetchAllPRs(client, "foo", "bar") + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(prs) != prCount { + t.Errorf("unexpected output %d vs %d", len(prs), prCount) + } + + if count != len(test.PullRequests) { + t.Errorf("unexpected number of fetches: %d", count) + } + server.Close() + } +} + +func TestComputeStatus(t *testing.T) { + tests := []struct { + statusList []*github.CombinedStatus + requiredContexts []string + expected string + }{ + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + expected: "success", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("error"), SHA: stringPtr("abcdef")}, + {State: stringPtr("pending"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + expected: "pending", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("pending"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + expected: "pending", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("failure"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + expected: "failure", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("failure"), SHA: stringPtr("abcdef")}, + {State: stringPtr("error"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + expected: "error", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "incomplete", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("pending"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "incomplete", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("failure"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "incomplete", + }, + { + statusList: []*github.CombinedStatus{ + {State: stringPtr("failure"), SHA: stringPtr("abcdef")}, + {State: stringPtr("error"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "incomplete", + }, + { + statusList: []*github.CombinedStatus{ + { + State: stringPtr("success"), + SHA: stringPtr("abcdef"), + Statuses: []github.RepoStatus{ + {Context: stringPtr("context")}, + }, + }, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "success", + }, + { + statusList: []*github.CombinedStatus{ + { + State: stringPtr("pending"), + SHA: stringPtr("abcdef"), + Statuses: []github.RepoStatus{ + {Context: stringPtr("context")}, + }, + }, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "pending", + }, + { + statusList: []*github.CombinedStatus{ + { + State: stringPtr("error"), + SHA: stringPtr("abcdef"), + Statuses: []github.RepoStatus{ + {Context: stringPtr("context")}, + }, + }, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "error", + }, + { + statusList: []*github.CombinedStatus{ + { + State: stringPtr("failure"), + SHA: stringPtr("abcdef"), + Statuses: []github.RepoStatus{ + {Context: stringPtr("context")}, + }, + }, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + {State: stringPtr("success"), SHA: stringPtr("abcdef")}, + }, + requiredContexts: []string{"context"}, + expected: "failure", + }, + } + + for _, test := range tests { + // ease of use, reduce boilerplate in test cases + if test.requiredContexts == nil { + test.requiredContexts = []string{} + } + status := computeStatus(test.statusList, test.requiredContexts) + if test.expected != status { + t.Errorf("expected: %s, saw %s", test.expected, status) + } + } +} diff --git a/contrib/submit-queue/jenkins/jenkins.go b/contrib/submit-queue/jenkins/jenkins.go new file mode 100644 index 00000000000..a16d7abac21 --- /dev/null +++ b/contrib/submit-queue/jenkins/jenkins.go @@ -0,0 +1,91 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package jenkins + +import ( + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/golang/glog" +) + +type JenkinsClient struct { + Host string +} + +type Queue struct { + Builds []Build `json:"builds"` + LastCompletedBuild Build `json:"lastCompletedBuild"` + LastStableBuild Build `json:"lastStableBuild"` +} + +type Build struct { + Number int `json:"number"` + URL string `json:"url"` +} + +type Job struct { + Result string `json:"result"` + ID string `json:"id"` + Timestamp int `json:timestamp` +} + +func (j *JenkinsClient) request(path string) ([]byte, error) { + url := j.Host + path + glog.V(3).Infof("Hitting: %s", url) + res, err := http.Get(url) + if err != nil { + return nil, err + } + defer res.Body.Close() + return ioutil.ReadAll(res.Body) +} + +func (j *JenkinsClient) GetJob(name string) (*Queue, error) { + data, err := j.request("/job/" + name + "/api/json") + if err != nil { + return nil, err + } + glog.V(8).Infof("Got data: %s", string(data)) + q := &Queue{} + if err := json.Unmarshal(data, q); err != nil { + return nil, err + } + return q, nil +} + +func (j *JenkinsClient) GetLastCompletedBuild(name string) (*Job, error) { + data, err := j.request("/job/" + name + "/lastCompletedBuild/api/json") + if err != nil { + return nil, err + } + glog.V(8).Infof("Got data: %s", string(data)) + job := &Job{} + if err := json.Unmarshal(data, job); err != nil { + return nil, err + } + return job, nil +} + +func (j *JenkinsClient) IsBuildStable(name string) (bool, error) { + q, err := j.GetLastCompletedBuild(name) + if err != nil { + return false, err + } + return q.Result == "SUCCESS", nil +} diff --git a/contrib/submit-queue/submit-queue.go b/contrib/submit-queue/submit-queue.go new file mode 100644 index 00000000000..b0735997a38 --- /dev/null +++ b/contrib/submit-queue/submit-queue.go @@ -0,0 +1,162 @@ +/* +Copyright 2015 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// A simple binary for merging PR that match a criteria +// Usage: +// submit-queue -token= -user-whitelist= --jenkins-host=http://some.host [-min-pr-number=] [-dry-run] [-once] +// +// Details: +/* +Usage of ./submit-queue: + -alsologtostderr=false: log to standard error as well as files + -dry-run=false: If true, don't actually merge anything + -jenkins-job="kubernetes-e2e-gce,kubernetes-e2e-gke-ci,kubernetes-build": Comma separated list of jobs in Jenkins to use for stability testing + -log_backtrace_at=:0: when logging hits line file:N, emit a stack trace + -log_dir="": If non-empty, write log files in this directory + -logtostderr=false: log to standard error instead of files + -min-pr-number=0: The minimum PR to start with [default: 0] + -once=false: If true, only merge one PR, don't run forever + -stderrthreshold=0: logs at or above this threshold go to stderr + -token="": The OAuth Token to use for requests. + -user-whitelist="": Path to a whitelist file that contains users to auto-merge. Required. + -v=0: log level for V logs + -vmodule=: comma-separated list of pattern=N settings for file-filtered logging +*/ + +import ( + "bufio" + "errors" + "flag" + "os" + "strings" + + "github.com/GoogleCloudPlatform/kubernetes/contrib/submit-queue/github" + "github.com/GoogleCloudPlatform/kubernetes/contrib/submit-queue/jenkins" + + "github.com/golang/glog" + github_api "github.com/google/go-github/github" +) + +var ( + token = flag.String("token", "", "The OAuth Token to use for requests.") + minPRNumber = flag.Int("min-pr-number", 0, "The minimum PR to start with [default: 0]") + dryrun = flag.Bool("dry-run", false, "If true, don't actually merge anything") + oneOff = flag.Bool("once", false, "If true, only merge one PR, don't run forever") + jobs = flag.String("jenkins-jobs", "kubernetes-e2e-gce,kubernetes-e2e-gke-ci,kubernetes-build", "Comma separated list of jobs in Jenkins to use for stability testing") + jenkinsHost = flag.String("jenkins-host", "", "The URL for the jenkins job to watch") + userWhitelist = flag.String("user-whitelist", "", "Path to a whitelist file that contains users to auto-merge. Required.") + requiredContexts = flag.String("required-contexts", "cla/google,Shippable,continuous-integration/travis-ci/pr,Jenkins GCE e2e", "Comma separate list of status contexts required for a PR to be considered ok to merge") + whitelistOverride = flag.String("whitelist-override-label", "ok-to-merge", "Github label, if present on a PR it will be merged even if the author isn't in the whitelist") +) + +const ( + org = "GoogleCloudPlatform" + project = "kubernetes" +) + +// This is called on a potentially mergeable PR +func runE2ETests(client *github_api.Client, pr *github_api.PullRequest, issue *github_api.Issue) error { + // Test if the build is stable in Jenkins + jenkinsClient := &jenkins.JenkinsClient{Host: *jenkinsHost} + builds := strings.Split(*jobs, ",") + for _, build := range builds { + stable, err := jenkinsClient.IsBuildStable(build) + glog.V(2).Infof("Checking build stability for %s", build) + if err != nil { + return err + } + if !stable { + glog.Errorf("Build %s isn't stable, skipping!", build) + return errors.New("Unstable build") + } + } + glog.V(2).Infof("Build is stable.") + // Ask for a fresh build + glog.V(4).Infof("Asking PR builder to build %d", *pr.Number) + body := "@k8s-bot test this [testing build queue, sorry for the noise]" + if _, _, err := client.Issues.CreateComment(org, project, *pr.Number, &github_api.IssueComment{Body: &body}); err != nil { + return err + } + + // Wait for the build to start + err := github.WaitForPending(client, org, project, *pr.Number) + + // Wait for the status to go back to 'success' + ok, err := github.ValidateStatus(client, org, project, *pr.Number, []string{}, true) + if err != nil { + return err + } + if !ok { + glog.Infof("Status after build is not 'success', skipping PR %d", *pr.Number) + return nil + } + if !*dryrun { + glog.Infof("Merging PR: %d", *pr.Number) + mergeBody := "Automatic merge from SubmitQueue" + if _, _, err := client.Issues.CreateComment(org, project, *pr.Number, &github_api.IssueComment{Body: &mergeBody}); err != nil { + glog.Warningf("Failed to create merge comment: %v", err) + return err + } + _, _, err := client.PullRequests.Merge(org, project, *pr.Number, "Auto commit by PR queue bot") + return err + } + glog.Infof("Skipping actual merge because --dry-run is set") + return nil +} + +func loadWhitelist(file string) ([]string, error) { + fp, err := os.Open(file) + if err != nil { + return nil, err + } + defer fp.Close() + scanner := bufio.NewScanner(fp) + result := []string{} + for scanner.Scan() { + result = append(result, scanner.Text()) + } + return result, scanner.Err() +} + +func main() { + flag.Parse() + if len(*userWhitelist) == 0 { + glog.Fatalf("--user-whitelist is required.") + } + if len(*jenkinsHost) == 0 { + glog.Fatalf("--jenkins-host is required.") + } + client := github.MakeClient(*token) + + users, err := loadWhitelist(*userWhitelist) + if err != nil { + glog.Fatalf("error loading user whitelist: %v", err) + } + requiredContexts := strings.Split(*requiredContexts, ",") + config := &github.FilterConfig{ + MinPRNumber: *minPRNumber, + UserWhitelist: users, + RequiredStatusContexts: requiredContexts, + WhitelistOverride: *whitelistOverride, + } + for !*oneOff { + if err := github.ForEachCandidatePRDo(client, org, project, runE2ETests, *oneOff, config); err != nil { + glog.Fatalf("Error getting candidate PRs: %v", err) + } + } +} diff --git a/contrib/submit-queue/whitelist.txt b/contrib/submit-queue/whitelist.txt new file mode 100644 index 00000000000..05ffe27508d --- /dev/null +++ b/contrib/submit-queue/whitelist.txt @@ -0,0 +1,41 @@ +brendandburns +thockin +mikedanese +a-robinson +saad-ali +lavalamp +smarterclayton +justinsb +satnam6502 +derekwaynecarr +dchen1107 +zmerlynn +erictune +eparis +caesarxuchao +wojtek-t +jlowdermilk +yifan-gu +nikhiljindal +markturansky +pmorie +yujuhong +roberthbailey +vishh +deads2k +bprashanth +cjcullen +liggitt +bgrant0607 +fgrzadkowski +jayunit100 +mbforbes +ArtfulCoder +piosz +davidopp +ixdy +marekbiskup +gmarek +ghodss +krousey +quinton-hoole From b36ae6efba99f13b173d5ac42519cb2f385be6ec Mon Sep 17 00:00:00 2001 From: Vishnu Kannan Date: Wed, 29 Jul 2015 16:13:33 -0700 Subject: [PATCH 35/49] Upgrading heapster to v0.17.0 --- .../cluster-monitoring/google/heapster-controller.yaml | 10 +++++----- .../googleinfluxdb/heapster-controller-combined.yaml | 10 +++++----- .../influxdb/heapster-controller.yaml | 10 +++++----- .../standalone/heapster-controller.yaml | 10 +++++----- 4 files changed, 20 insertions(+), 20 deletions(-) diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index dfa8495a971..406cdfbcdd5 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -1,26 +1,26 @@ apiVersion: v1 kind: ReplicationController metadata: - name: monitoring-heapster-v5 + name: monitoring-heapster-v6 namespace: kube-system labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: heapster - version: v5 + version: v6 template: metadata: labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/heapster:v0.16.0 + - image: gcr.io/google_containers/heapster:v0.17.0 name: heapster resources: limits: diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index fcddb56ce41..b9123f157e7 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -1,26 +1,26 @@ apiVersion: v1 kind: ReplicationController metadata: - name: monitoring-heapster-v5 + name: monitoring-heapster-v6 namespace: kube-system labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: heapster - version: v5 + version: v6 template: metadata: labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/heapster:v0.16.0 + - image: gcr.io/google_containers/heapster:v0.17.0 name: heapster resources: limits: diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index e58d7f9b3c2..320561290dd 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -1,26 +1,26 @@ apiVersion: v1 kind: ReplicationController metadata: - name: monitoring-heapster-v5 + name: monitoring-heapster-v6 namespace: kube-system labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: heapster - version: v5 + version: v6 template: metadata: labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/heapster:v0.16.0 + - image: gcr.io/google_containers/heapster:v0.17.0 name: heapster resources: limits: diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index 584402df888..8a9598e5ddc 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -1,26 +1,26 @@ apiVersion: v1 kind: ReplicationController metadata: - name: monitoring-heapster-v5 + name: monitoring-heapster-v6 namespace: kube-system labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: replicas: 1 selector: k8s-app: heapster - version: v5 + version: v6 template: metadata: labels: k8s-app: heapster - version: v5 + version: v6 kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/heapster:v0.16.0 + - image: gcr.io/google_containers/heapster:v0.17.0 name: heapster resources: limits: From 1b84fb7d74094c3f1170d78104766440fbad69ee Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Mon, 6 Jul 2015 14:37:46 -0700 Subject: [PATCH 36/49] make testclient threadsafe by guarding internal state with accessors --- pkg/client/testclient/fake_endpoints.go | 4 +- pkg/client/testclient/fake_events.go | 6 +-- pkg/client/testclient/fake_limit_ranges.go | 2 +- pkg/client/testclient/fake_namespaces.go | 6 +-- pkg/client/testclient/fake_nodes.go | 4 +- .../fake_persistent_volume_claims.go | 4 +- .../testclient/fake_persistent_volumes.go | 4 +- pkg/client/testclient/fake_pod_templates.go | 4 +- pkg/client/testclient/fake_pods.go | 6 +-- .../fake_replication_controllers.go | 2 +- pkg/client/testclient/fake_resource_quotas.go | 2 +- pkg/client/testclient/fake_secrets.go | 4 +- .../testclient/fake_service_accounts.go | 4 +- pkg/client/testclient/fake_services.go | 4 +- pkg/client/testclient/testclient.go | 53 ++++++++++++++++--- .../nodecontroller/nodecontroller_test.go | 2 +- .../servicecontroller_test.go | 13 ++--- .../replication_controller_test.go | 2 +- pkg/kubectl/rolling_updater_test.go | 6 +-- pkg/kubectl/scale_test.go | 22 ++++---- pkg/kubectl/stop_test.go | 12 +++-- pkg/kubelet/kubelet_test.go | 25 +++++---- pkg/kubelet/status_manager_test.go | 2 +- pkg/namespace/namespace_controller_test.go | 19 +++---- .../resource_quota_controller_test.go | 7 +-- .../serviceaccounts_controller_test.go | 7 +-- pkg/serviceaccount/tokens_controller_test.go | 9 ++-- .../namespace/autoprovision/admission_test.go | 15 +++--- 28 files changed, 151 insertions(+), 99 deletions(-) diff --git a/pkg/client/testclient/fake_endpoints.go b/pkg/client/testclient/fake_endpoints.go index 5867f6e23f2..79a0d359eac 100644 --- a/pkg/client/testclient/fake_endpoints.go +++ b/pkg/client/testclient/fake_endpoints.go @@ -51,8 +51,8 @@ func (c *FakeEndpoints) Delete(name string) error { } func (c *FakeEndpoints) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-endpoints", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-endpoints", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } func (c *FakeEndpoints) Update(endpoints *api.Endpoints) (*api.Endpoints, error) { diff --git a/pkg/client/testclient/fake_events.go b/pkg/client/testclient/fake_events.go index a5ee01d4440..4cc28a2ec22 100644 --- a/pkg/client/testclient/fake_events.go +++ b/pkg/client/testclient/fake_events.go @@ -56,8 +56,8 @@ func (c *FakeEvents) Get(id string) (*api.Event, error) { // Watch starts watching for events matching the given selectors. func (c *FakeEvents) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-events", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-events", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } // Search returns a list of events matching the specified object. @@ -72,6 +72,6 @@ func (c *FakeEvents) Delete(name string) error { } func (c *FakeEvents) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "get-field-selector"}) + c.Fake.Invokes(FakeAction{Action: "get-field-selector"}, nil) return fields.Everything() } diff --git a/pkg/client/testclient/fake_limit_ranges.go b/pkg/client/testclient/fake_limit_ranges.go index 1a0014110cd..36ec5c2ce59 100644 --- a/pkg/client/testclient/fake_limit_ranges.go +++ b/pkg/client/testclient/fake_limit_ranges.go @@ -56,6 +56,6 @@ func (c *FakeLimitRanges) Update(limitRange *api.LimitRange) (*api.LimitRange, e } func (c *FakeLimitRanges) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-limitRange", Value: resourceVersion}) + c.Fake.Invokes(FakeAction{Action: "watch-limitRange", Value: resourceVersion}, nil) return c.Fake.Watch, nil } diff --git a/pkg/client/testclient/fake_namespaces.go b/pkg/client/testclient/fake_namespaces.go index 9132fbc1900..1679e2f532b 100644 --- a/pkg/client/testclient/fake_namespaces.go +++ b/pkg/client/testclient/fake_namespaces.go @@ -45,8 +45,8 @@ func (c *FakeNamespaces) Delete(name string) error { } func (c *FakeNamespaces) Create(namespace *api.Namespace) (*api.Namespace, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "create-namespace"}) - return &api.Namespace{}, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "create-namespace"}, nil) + return &api.Namespace{}, c.Fake.Err() } func (c *FakeNamespaces) Update(namespace *api.Namespace) (*api.Namespace, error) { @@ -55,7 +55,7 @@ func (c *FakeNamespaces) Update(namespace *api.Namespace) (*api.Namespace, error } func (c *FakeNamespaces) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-namespaces", Value: resourceVersion}) + c.Fake.Invokes(FakeAction{Action: "watch-namespaces", Value: resourceVersion}, nil) return c.Fake.Watch, nil } diff --git a/pkg/client/testclient/fake_nodes.go b/pkg/client/testclient/fake_nodes.go index 522b61d803b..99e6d691b9d 100644 --- a/pkg/client/testclient/fake_nodes.go +++ b/pkg/client/testclient/fake_nodes.go @@ -60,6 +60,6 @@ func (c *FakeNodes) UpdateStatus(minion *api.Node) (*api.Node, error) { } func (c *FakeNodes) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-nodes", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-nodes", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } diff --git a/pkg/client/testclient/fake_persistent_volume_claims.go b/pkg/client/testclient/fake_persistent_volume_claims.go index 750be6e7eb0..860679723ed 100644 --- a/pkg/client/testclient/fake_persistent_volume_claims.go +++ b/pkg/client/testclient/fake_persistent_volume_claims.go @@ -59,6 +59,6 @@ func (c *FakePersistentVolumeClaims) UpdateStatus(claim *api.PersistentVolumeCla } func (c *FakePersistentVolumeClaims) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-persistentVolumeClaims", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-persistentVolumeClaims", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } diff --git a/pkg/client/testclient/fake_persistent_volumes.go b/pkg/client/testclient/fake_persistent_volumes.go index f0bdd00c230..414fccb3a91 100644 --- a/pkg/client/testclient/fake_persistent_volumes.go +++ b/pkg/client/testclient/fake_persistent_volumes.go @@ -59,6 +59,6 @@ func (c *FakePersistentVolumes) UpdateStatus(pv *api.PersistentVolume) (*api.Per } func (c *FakePersistentVolumes) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-persistentVolumes", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-persistentVolumes", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } diff --git a/pkg/client/testclient/fake_pod_templates.go b/pkg/client/testclient/fake_pod_templates.go index c7913e24a3c..7d9f82ee4ff 100644 --- a/pkg/client/testclient/fake_pod_templates.go +++ b/pkg/client/testclient/fake_pod_templates.go @@ -56,6 +56,6 @@ func (c *FakePodTemplates) Update(pod *api.PodTemplate) (*api.PodTemplate, error } func (c *FakePodTemplates) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-podTemplates", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-podTemplates", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } diff --git a/pkg/client/testclient/fake_pods.go b/pkg/client/testclient/fake_pods.go index b73b1fd4bab..b8ec2d7d8ad 100644 --- a/pkg/client/testclient/fake_pods.go +++ b/pkg/client/testclient/fake_pods.go @@ -56,12 +56,12 @@ func (c *FakePods) Update(pod *api.Pod) (*api.Pod, error) { } func (c *FakePods) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-pods", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-pods", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } func (c *FakePods) Bind(bind *api.Binding) error { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "bind-pod", Value: bind.Name}) + c.Fake.Invokes(FakeAction{Action: "bind-pod", Value: bind.Name}, nil) return nil } diff --git a/pkg/client/testclient/fake_replication_controllers.go b/pkg/client/testclient/fake_replication_controllers.go index 6a6deaf9a14..b64bb0aa16d 100644 --- a/pkg/client/testclient/fake_replication_controllers.go +++ b/pkg/client/testclient/fake_replication_controllers.go @@ -65,6 +65,6 @@ func (c *FakeReplicationControllers) Delete(name string) error { } func (c *FakeReplicationControllers) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: WatchControllerAction, Value: resourceVersion}) + c.Fake.Invokes(FakeAction{Action: WatchControllerAction, Value: resourceVersion}, nil) return c.Fake.Watch, nil } diff --git a/pkg/client/testclient/fake_resource_quotas.go b/pkg/client/testclient/fake_resource_quotas.go index 51132a3f0c3..9a5763755f5 100644 --- a/pkg/client/testclient/fake_resource_quotas.go +++ b/pkg/client/testclient/fake_resource_quotas.go @@ -61,6 +61,6 @@ func (c *FakeResourceQuotas) UpdateStatus(resourceQuota *api.ResourceQuota) (*ap } func (c *FakeResourceQuotas) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-resourceQuota", Value: resourceVersion}) + c.Fake.Invokes(FakeAction{Action: "watch-resourceQuota", Value: resourceVersion}, nil) return c.Fake.Watch, nil } diff --git a/pkg/client/testclient/fake_secrets.go b/pkg/client/testclient/fake_secrets.go index a4ffb90043e..d08692dd5d8 100644 --- a/pkg/client/testclient/fake_secrets.go +++ b/pkg/client/testclient/fake_secrets.go @@ -56,6 +56,6 @@ func (c *FakeSecrets) Delete(name string) error { } func (c *FakeSecrets) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-secrets", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-secrets", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } diff --git a/pkg/client/testclient/fake_service_accounts.go b/pkg/client/testclient/fake_service_accounts.go index 9185c97284c..e57aeddadce 100644 --- a/pkg/client/testclient/fake_service_accounts.go +++ b/pkg/client/testclient/fake_service_accounts.go @@ -56,6 +56,6 @@ func (c *FakeServiceAccounts) Delete(name string) error { } func (c *FakeServiceAccounts) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-serviceAccounts", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-serviceAccounts", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } diff --git a/pkg/client/testclient/fake_services.go b/pkg/client/testclient/fake_services.go index b8b02eaa97d..73c7d101895 100644 --- a/pkg/client/testclient/fake_services.go +++ b/pkg/client/testclient/fake_services.go @@ -56,6 +56,6 @@ func (c *FakeServices) Delete(name string) error { } func (c *FakeServices) Watch(label labels.Selector, field fields.Selector, resourceVersion string) (watch.Interface, error) { - c.Fake.Actions = append(c.Fake.Actions, FakeAction{Action: "watch-services", Value: resourceVersion}) - return c.Fake.Watch, c.Fake.Err + c.Fake.Invokes(FakeAction{Action: "watch-services", Value: resourceVersion}, nil) + return c.Fake.Watch, c.Fake.Err() } diff --git a/pkg/client/testclient/testclient.go b/pkg/client/testclient/testclient.go index b0c08d18637..2d962ad86c2 100644 --- a/pkg/client/testclient/testclient.go +++ b/pkg/client/testclient/testclient.go @@ -17,6 +17,8 @@ limitations under the License. package testclient import ( + "sync" + "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/registered" @@ -48,10 +50,11 @@ type ReactionFunc func(FakeAction) (runtime.Object, error) // Fake implements client.Interface. Meant to be embedded into a struct to get a default // implementation. This makes faking out just the method you want to test easier. type Fake struct { - Actions []FakeAction - Watch watch.Interface - Err error + sync.RWMutex + actions []FakeAction + err error + Watch watch.Interface // ReactFn is an optional function that will be invoked with the provided action // and return a response. It can implement scenario specific behavior. The type // of object returned must match the expected type from the caller (even if nil). @@ -61,11 +64,47 @@ type Fake struct { // Invokes records the provided FakeAction and then invokes the ReactFn (if provided). // obj is expected to be of the same type a normal call would return. func (c *Fake) Invokes(action FakeAction, obj runtime.Object) (runtime.Object, error) { - c.Actions = append(c.Actions, action) + c.Lock() + defer c.Unlock() + + c.actions = append(c.actions, action) if c.ReactFn != nil { return c.ReactFn(action) } - return obj, c.Err + return obj, c.err +} + +// ClearActions clears the history of actions called on the fake client +func (c *Fake) ClearActions() { + c.Lock() + c.Unlock() + + c.actions = make([]FakeAction, 0) +} + +// Actions returns a chronologically ordered slice fake actions called on the fake client +func (c *Fake) Actions() []FakeAction { + c.RLock() + defer c.RUnlock() + fa := make([]FakeAction, len(c.actions)) + copy(fa, c.actions) + return fa +} + +// SetErr sets the error to return for client calls +func (c *Fake) SetErr(err error) { + c.Lock() + defer c.Unlock() + + c.err = err +} + +// Err returns any a client error or nil +func (c *Fake) Err() error { + c.RLock() + c.RUnlock() + + return c.err } func (c *Fake) LimitRanges(namespace string) client.LimitRangeInterface { @@ -125,13 +164,13 @@ func (c *Fake) Namespaces() client.NamespaceInterface { } func (c *Fake) ServerVersion() (*version.Info, error) { - c.Actions = append(c.Actions, FakeAction{Action: "get-version", Value: nil}) + c.Invokes(FakeAction{Action: "get-version", Value: nil}, nil) versionInfo := version.Get() return &versionInfo, nil } func (c *Fake) ServerAPIVersions() (*api.APIVersions, error) { - c.Actions = append(c.Actions, FakeAction{Action: "get-apiversions", Value: nil}) + c.Invokes(FakeAction{Action: "get-apiversions", Value: nil}, nil) return &api.APIVersions{Versions: registered.RegisteredVersions}, nil } diff --git a/pkg/cloudprovider/nodecontroller/nodecontroller_test.go b/pkg/cloudprovider/nodecontroller/nodecontroller_test.go index 88aa2f390ab..11bfeb263ae 100644 --- a/pkg/cloudprovider/nodecontroller/nodecontroller_test.go +++ b/pkg/cloudprovider/nodecontroller/nodecontroller_test.go @@ -342,7 +342,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) { podEvictor.TryEvict(func(nodeName string) { nodeController.deletePods(nodeName) }) podEvicted := false - for _, action := range item.fakeNodeHandler.Actions { + for _, action := range item.fakeNodeHandler.Actions() { if action.Action == "delete-pod" { podEvicted = true } diff --git a/pkg/cloudprovider/servicecontroller/servicecontroller_test.go b/pkg/cloudprovider/servicecontroller/servicecontroller_test.go index d62b3a47162..032ffefb2e4 100644 --- a/pkg/cloudprovider/servicecontroller/servicecontroller_test.go +++ b/pkg/cloudprovider/servicecontroller/servicecontroller_test.go @@ -93,20 +93,21 @@ func TestCreateExternalLoadBalancer(t *testing.T) { client := &testclient.Fake{} controller := New(cloud, client, "test-cluster") controller.init() - cloud.Calls = nil // ignore any cloud calls made in init() - client.Actions = nil // ignore any client calls made in init() + cloud.Calls = nil // ignore any cloud calls made in init() + client.ClearActions() // ignore any client calls made in init() err, _ := controller.createLoadBalancerIfNeeded(types.NamespacedName{"foo", "bar"}, item.service, nil) if !item.expectErr && err != nil { t.Errorf("unexpected error: %v", err) } else if item.expectErr && err == nil { t.Errorf("expected error creating %v, got nil", item.service) } + actions := client.Actions() if !item.expectCreateAttempt { if len(cloud.Calls) > 0 { t.Errorf("unexpected cloud provider calls: %v", cloud.Calls) } - if len(client.Actions) > 0 { - t.Errorf("unexpected client actions: %v", client.Actions) + if len(actions) > 0 { + t.Errorf("unexpected client actions: %v", actions) } } else { if len(cloud.Balancers) != 1 { @@ -117,13 +118,13 @@ func TestCreateExternalLoadBalancer(t *testing.T) { t.Errorf("created load balancer has incorrect parameters: %v", cloud.Balancers[0]) } actionFound := false - for _, action := range client.Actions { + for _, action := range actions { if action.Action == "update-service" { actionFound = true } } if !actionFound { - t.Errorf("expected updated service to be sent to client, got these actions instead: %v", client.Actions) + t.Errorf("expected updated service to be sent to client, got these actions instead: %v", actions) } } } diff --git a/pkg/controller/replication/replication_controller_test.go b/pkg/controller/replication/replication_controller_test.go index 292c116515e..497f660dcac 100644 --- a/pkg/controller/replication/replication_controller_test.go +++ b/pkg/controller/replication/replication_controller_test.go @@ -693,7 +693,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) { numReplicas := 10 updateReplicaCount(fakeRCClient, *rc, numReplicas) updates, gets := 0, 0 - for _, a := range fakeClient.Actions { + for _, a := range fakeClient.Actions() { switch a.Action { case testclient.GetControllerAction: gets++ diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 8bd497ff000..e65245a5833 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -74,19 +74,19 @@ func (c *fakeRc) Get(name string) (*api.ReplicationController, error) { if len(c.responses) == 0 { return nil, fmt.Errorf("Unexpected Action: %s", action) } - c.Fake.Actions = append(c.Fake.Actions, action) + c.Fake.Invokes(action, nil) result := c.responses[0] c.responses = c.responses[1:] return result.controller, result.err } func (c *fakeRc) Create(controller *api.ReplicationController) (*api.ReplicationController, error) { - c.Fake.Actions = append(c.Fake.Actions, testclient.FakeAction{Action: "create-controller", Value: controller.ObjectMeta.Name}) + c.Fake.Invokes(testclient.FakeAction{Action: "create-controller", Value: controller.ObjectMeta.Name}, nil) return controller, nil } func (c *fakeRc) Update(controller *api.ReplicationController) (*api.ReplicationController, error) { - c.Fake.Actions = append(c.Fake.Actions, testclient.FakeAction{Action: "update-controller", Value: controller.ObjectMeta.Name}) + c.Fake.Invokes(testclient.FakeAction{Action: "update-controller", Value: controller.ObjectMeta.Name}, nil) return controller, nil } diff --git a/pkg/kubectl/scale_test.go b/pkg/kubectl/scale_test.go index 716fd6b6e62..8c16efdb153 100644 --- a/pkg/kubectl/scale_test.go +++ b/pkg/kubectl/scale_test.go @@ -73,14 +73,15 @@ func TestReplicationControllerScale(t *testing.T) { name := "foo" scaler.Scale("default", name, count, &preconditions, nil, nil) - if len(fake.Actions) != 2 { - t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions) + actions := fake.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) } - if fake.Actions[0].Action != "get-replicationController" || fake.Actions[0].Value != name { - t.Errorf("unexpected action: %v, expected get-replicationController %s", fake.Actions[0], name) + if actions[0].Action != "get-replicationController" || actions[0].Value != name { + t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) } - if fake.Actions[1].Action != "update-replicationController" || fake.Actions[1].Value.(*api.ReplicationController).Spec.Replicas != int(count) { - t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", fake.Actions[1], count) + if actions[1].Action != "update-replicationController" || actions[1].Value.(*api.ReplicationController).Spec.Replicas != int(count) { + t.Errorf("unexpected action %v, expected update-replicationController with replicas = %d", actions[1], count) } } @@ -96,11 +97,12 @@ func TestReplicationControllerScaleFailsPreconditions(t *testing.T) { name := "foo" scaler.Scale("default", name, count, &preconditions, nil, nil) - if len(fake.Actions) != 1 { - t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions) + actions := fake.Actions() + if len(actions) != 1 { + t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", actions) } - if fake.Actions[0].Action != "get-replicationController" || fake.Actions[0].Value != name { - t.Errorf("unexpected action: %v, expected get-replicationController %s", fake.Actions[0], name) + if actions[0].Action != "get-replicationController" || actions[0].Value != name { + t.Errorf("unexpected action: %v, expected get-replicationController %s", actions[0], name) } } diff --git a/pkg/kubectl/stop_test.go b/pkg/kubectl/stop_test.go index 68fded4c902..379e46cfd6b 100644 --- a/pkg/kubectl/stop_test.go +++ b/pkg/kubectl/stop_test.go @@ -47,12 +47,13 @@ func TestReplicationControllerStop(t *testing.T) { if s != expected { t.Errorf("expected %s, got %s", expected, s) } - if len(fake.Actions) != 7 { + actions := fake.Actions() + if len(actions) != 7 { t.Errorf("unexpected actions: %v, expected 6 actions (get, list, get, update, get, get, delete)", fake.Actions) } for i, action := range []string{"get", "list", "get", "update", "get", "get", "delete"} { - if fake.Actions[i].Action != action+"-replicationController" { - t.Errorf("unexpected action: %+v, expected %s-replicationController", fake.Actions[i], action) + if actions[i].Action != action+"-replicationController" { + t.Errorf("unexpected action: %+v, expected %s-replicationController", actions[i], action) } } } @@ -159,10 +160,11 @@ func TestSimpleStop(t *testing.T) { t.Errorf("unexpected return: %s (%s)", s, test.test) } } - if len(test.actions) != len(fake.Actions) { + actions := fake.Actions() + if len(test.actions) != len(actions) { t.Errorf("unexpected actions: %v; expected %v (%s)", fake.Actions, test.actions, test.test) } - for i, action := range fake.Actions { + for i, action := range actions { testAction := test.actions[i] if action.Action != testAction { t.Errorf("unexpected action: %v; expected %v (%s)", action, testAction, test.test) diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index a9713e6bb1e..a87a3e43932 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -2322,10 +2322,11 @@ func TestUpdateNewNodeStatus(t *testing.T) { if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } - if len(kubeClient.Actions) != 2 || kubeClient.Actions[1].Action != "update-status-node" { - t.Fatalf("unexpected actions: %v", kubeClient.Actions) + actions := kubeClient.Actions() + if len(actions) != 2 || actions[1].Action != "update-status-node" { + t.Fatalf("unexpected actions: %v", actions) } - updatedNode, ok := kubeClient.Actions[1].Value.(*api.Node) + updatedNode, ok := actions[1].Value.(*api.Node) if !ok { t.Errorf("unexpected object type") } @@ -2419,10 +2420,11 @@ func TestUpdateExistingNodeStatus(t *testing.T) { if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } - if len(kubeClient.Actions) != 2 { - t.Errorf("unexpected actions: %v", kubeClient.Actions) + actions := kubeClient.Actions() + if len(actions) != 2 { + t.Errorf("unexpected actions: %v", actions) } - updatedNode, ok := kubeClient.Actions[1].Value.(*api.Node) + updatedNode, ok := actions[1].Value.(*api.Node) if !ok { t.Errorf("unexpected object type") } @@ -2506,10 +2508,11 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) { if err := kubelet.updateNodeStatus(); err != nil { t.Errorf("unexpected error: %v", err) } - if len(kubeClient.Actions) != 2 || kubeClient.Actions[1].Action != "update-status-node" { - t.Fatalf("unexpected actions: %v", kubeClient.Actions) + actions := kubeClient.Actions() + if len(actions) != 2 || actions[1].Action != "update-status-node" { + t.Fatalf("unexpected actions: %v", actions) } - updatedNode, ok := kubeClient.Actions[1].Value.(*api.Node) + updatedNode, ok := actions[1].Value.(*api.Node) if !ok { t.Errorf("unexpected object type") } @@ -2536,8 +2539,8 @@ func TestUpdateNodeStatusError(t *testing.T) { if err := kubelet.updateNodeStatus(); err == nil { t.Errorf("unexpected non error: %v", err) } - if len(testKubelet.fakeKubeClient.Actions) != nodeStatusUpdateRetry { - t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions) + if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry { + t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions()) } } diff --git a/pkg/kubelet/status_manager_test.go b/pkg/kubelet/status_manager_test.go index fc473aa8032..46fabb2842a 100644 --- a/pkg/kubelet/status_manager_test.go +++ b/pkg/kubelet/status_manager_test.go @@ -53,7 +53,7 @@ func getRandomPodStatus() api.PodStatus { } func verifyActions(t *testing.T, kubeClient client.Interface, expectedActions []string) { - actions := kubeClient.(*testclient.Fake).Actions + actions := kubeClient.(*testclient.Fake).Actions() if len(actions) != len(expectedActions) { t.Errorf("unexpected actions, got: %s expected: %s", actions, expectedActions) return diff --git a/pkg/namespace/namespace_controller_test.go b/pkg/namespace/namespace_controller_test.go index 0d025c96a86..0dd480d23a1 100644 --- a/pkg/namespace/namespace_controller_test.go +++ b/pkg/namespace/namespace_controller_test.go @@ -53,13 +53,14 @@ func TestFinalize(t *testing.T) { }, } finalize(mockClient, testNamespace) - if len(mockClient.Actions) != 1 { - t.Errorf("Expected 1 mock client action, but got %v", len(mockClient.Actions)) + actions := mockClient.Actions() + if len(actions) != 1 { + t.Errorf("Expected 1 mock client action, but got %v", len(actions)) } - if mockClient.Actions[0].Action != "finalize-namespace" { - t.Errorf("Expected finalize-namespace action %v", mockClient.Actions[0].Action) + if actions[0].Action != "finalize-namespace" { + t.Errorf("Expected finalize-namespace action %v", actions[0].Action) } - finalizers := mockClient.Actions[0].Value.(*api.Namespace).Spec.Finalizers + finalizers := actions[0].Value.(*api.Namespace).Spec.Finalizers if len(finalizers) != 1 { t.Errorf("There should be a single finalizer remaining") } @@ -100,8 +101,8 @@ func TestSyncNamespaceThatIsTerminating(t *testing.T) { "finalize-namespace", "delete-namespace") actionSet := util.NewStringSet() - for i := range mockClient.Actions { - actionSet.Insert(mockClient.Actions[i].Action) + for _, action := range mockClient.Actions() { + actionSet.Insert(action.Action) } if !actionSet.HasAll(expectedActionSet.List()...) { t.Errorf("Expected actions: %v, but got: %v", expectedActionSet, actionSet) @@ -127,8 +128,8 @@ func TestSyncNamespaceThatIsActive(t *testing.T) { t.Errorf("Unexpected error when synching namespace %v", err) } actionSet := util.NewStringSet() - for i := range mockClient.Actions { - actionSet.Insert(mockClient.Actions[i].Action) + for _, action := range mockClient.Actions() { + actionSet.Insert(action.Action) } if len(actionSet) != 0 { t.Errorf("Expected no action from controller, but got: %v", actionSet) diff --git a/pkg/resourcequota/resource_quota_controller_test.go b/pkg/resourcequota/resource_quota_controller_test.go index a1414ad8acf..14b0ff5d5c0 100644 --- a/pkg/resourcequota/resource_quota_controller_test.go +++ b/pkg/resourcequota/resource_quota_controller_test.go @@ -158,7 +158,7 @@ func TestSyncResourceQuota(t *testing.T) { t.Fatalf("Unexpected error %v", err) } - usage := kubeClient.Actions[1].Value.(*api.ResourceQuota) + usage := kubeClient.Actions()[1].Value.(*api.ResourceQuota) // ensure hard and used limits are what we expected for k, v := range expectedUsage.Status.Hard { @@ -216,7 +216,7 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) { t.Fatalf("Unexpected error %v", err) } - usage := kubeClient.Actions[1].Value.(*api.ResourceQuota) + usage := kubeClient.Actions()[1].Value.(*api.ResourceQuota) // ensure hard and used limits are what we expected for k, v := range expectedUsage.Status.Hard { @@ -263,7 +263,8 @@ func TestSyncResourceQuotaNoChange(t *testing.T) { t.Fatalf("Unexpected error %v", err) } - if len(kubeClient.Actions) != 1 && kubeClient.Actions[0].Action != "list-pods" { + actions := kubeClient.Actions() + if len(actions) != 1 && actions[0].Action != "list-pods" { t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions) } } diff --git a/pkg/serviceaccount/serviceaccounts_controller_test.go b/pkg/serviceaccount/serviceaccounts_controller_test.go index 4d225eaabc4..924656d61bd 100644 --- a/pkg/serviceaccount/serviceaccounts_controller_test.go +++ b/pkg/serviceaccount/serviceaccounts_controller_test.go @@ -192,12 +192,13 @@ func TestServiceAccountCreation(t *testing.T) { controller.serviceAccountDeleted(tc.DeletedServiceAccount) } - if len(tc.ExpectCreatedServiceAccounts) != len(client.Actions) { - t.Errorf("%s: Expected to create accounts %#v. Actual actions were: %#v", k, tc.ExpectCreatedServiceAccounts, client.Actions) + actions := client.Actions() + if len(tc.ExpectCreatedServiceAccounts) != len(actions) { + t.Errorf("%s: Expected to create accounts %#v. Actual actions were: %#v", k, tc.ExpectCreatedServiceAccounts, actions) continue } for i, expectedName := range tc.ExpectCreatedServiceAccounts { - action := client.Actions[i] + action := actions[i] if action.Action != "create-serviceaccount" { t.Errorf("%s: Unexpected action %s", k, action.Action) break diff --git a/pkg/serviceaccount/tokens_controller_test.go b/pkg/serviceaccount/tokens_controller_test.go index 7b78244c103..8c8ba8fe5bc 100644 --- a/pkg/serviceaccount/tokens_controller_test.go +++ b/pkg/serviceaccount/tokens_controller_test.go @@ -462,9 +462,10 @@ func TestTokenCreation(t *testing.T) { controller.secretDeleted(tc.DeletedSecret) } - for i, action := range client.Actions { + actions := client.Actions() + for i, action := range actions { if len(tc.ExpectedActions) < i+1 { - t.Errorf("%s: %d unexpected actions: %+v", k, len(client.Actions)-len(tc.ExpectedActions), client.Actions[i:]) + t.Errorf("%s: %d unexpected actions: %+v", k, len(actions)-len(tc.ExpectedActions), actions[i:]) break } @@ -479,8 +480,8 @@ func TestTokenCreation(t *testing.T) { } } - if len(tc.ExpectedActions) > len(client.Actions) { - t.Errorf("%s: %d additional expected actions:%+v", k, len(tc.ExpectedActions)-len(client.Actions), tc.ExpectedActions[len(client.Actions):]) + if len(tc.ExpectedActions) > len(actions) { + t.Errorf("%s: %d additional expected actions:%+v", k, len(tc.ExpectedActions)-len(actions), tc.ExpectedActions[len(actions):]) } } } diff --git a/plugin/pkg/admission/namespace/autoprovision/admission_test.go b/plugin/pkg/admission/namespace/autoprovision/admission_test.go index af2e35f8005..97e4dca1532 100644 --- a/plugin/pkg/admission/namespace/autoprovision/admission_test.go +++ b/plugin/pkg/admission/namespace/autoprovision/admission_test.go @@ -45,10 +45,11 @@ func TestAdmission(t *testing.T) { if err != nil { t.Errorf("Unexpected error returned from admission handler") } - if len(mockClient.Actions) != 1 { + actions := mockClient.Actions() + if len(actions) != 1 { t.Errorf("Expected a create-namespace request") } - if mockClient.Actions[0].Action != "create-namespace" { + if actions[0].Action != "create-namespace" { t.Errorf("Expected a create-namespace request to be made via the client") } } @@ -76,7 +77,7 @@ func TestAdmissionNamespaceExists(t *testing.T) { if err != nil { t.Errorf("Unexpected error returned from admission handler") } - if len(mockClient.Actions) != 0 { + if len(mockClient.Actions()) != 0 { t.Errorf("No client request should have been made") } } @@ -97,7 +98,7 @@ func TestIgnoreAdmission(t *testing.T) { if err != nil { t.Errorf("Unexpected error returned from admission handler") } - if len(mockClient.Actions) != 0 { + if len(mockClient.Actions()) != 0 { t.Errorf("No client request should have been made") } } @@ -105,9 +106,9 @@ func TestIgnoreAdmission(t *testing.T) { // TestAdmissionNamespaceExistsUnknownToHandler func TestAdmissionNamespaceExistsUnknownToHandler(t *testing.T) { namespace := "test" - mockClient := &testclient.Fake{ - Err: errors.NewAlreadyExists("namespaces", namespace), - } + mockClient := &testclient.Fake{} + mockClient.SetErr(errors.NewAlreadyExists("namespaces", namespace)) + store := cache.NewStore(cache.MetaNamespaceKeyFunc) handler := &provision{ client: mockClient, From ffd38e376f3980adff0b0143ce8d902fa1673b73 Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Wed, 29 Jul 2015 18:26:21 -0700 Subject: [PATCH 37/49] Revert "Fix file extension in documentation " From 01c3c0f4e1eb27965629635a42b0e85625845b15 Mon Sep 17 00:00:00 2001 From: gmarek Date: Fri, 24 Jul 2015 15:46:38 +0200 Subject: [PATCH 38/49] Move addon_update e2e test to framework --- test/e2e/addon_update.go | 61 +++++++++++++--------------------------- 1 file changed, 20 insertions(+), 41 deletions(-) diff --git a/test/e2e/addon_update.go b/test/e2e/addon_update.go index 349fbbfb4af..1d1a092a004 100644 --- a/test/e2e/addon_update.go +++ b/test/e2e/addon_update.go @@ -192,28 +192,21 @@ var _ = Describe("Addon update", func() { var dir string var sshClient *ssh.Client - var c *client.Client - var namespace *api.Namespace + f := NewFramework("addon-update-test") BeforeEach(func() { // This test requires: - // - SSH - // - master access + // - SSH master access // ... so the provider check should be identical to the intersection of // providers that provide those capabilities. if !providerIs("gce") { return } + var err error - c, err = loadClient() - Expect(err).NotTo(HaveOccurred()) - sshClient, err = getMasterSSHClient() Expect(err).NotTo(HaveOccurred()) - namespace, err = createTestingNS("addon-update-test", c) - Expect(err).NotTo(HaveOccurred()) - // Reduce the addon update intervals so that we have faster response // to changes in the addon directory. // do not use "service" command because it clears the environment variables @@ -221,25 +214,11 @@ var _ = Describe("Addon update", func() { }) AfterEach(func() { - // This test requires: - // - SSH - // - master access - // ... so the provider check should be identical to the intersection of - // providers that provide those capabilities. - if !providerIs("gce") { - return - } if sshClient != nil { // restart addon_update with the default options sshExec(sshClient, "sudo /etc/init.d/kube-addons restart") sshClient.Close() } - if err := c.Namespaces().Delete(namespace.Name); err != nil { - Failf("Couldn't delete ns %q: %s", namespace, err) - } - // Paranoia-- prevent reuse! - namespace = nil - c = nil }) // WARNING: the test is not parallel-friendly! @@ -253,7 +232,7 @@ var _ = Describe("Addon update", func() { //these tests are long, so I squeezed several cases in one scenario Expect(sshClient).NotTo(BeNil()) - dir = namespace.Name // we use it only to give a unique string for each test execution + dir = f.Namespace.Name // we use it only to give a unique string for each test execution temporaryRemotePathPrefix := "addon-test-dir" temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master @@ -270,10 +249,10 @@ var _ = Describe("Addon update", func() { var remoteFiles []stringPair = []stringPair{ {fmt.Sprintf(addon_controller_v1, defaultNsName), rcv1}, - {fmt.Sprintf(addon_controller_v2, namespace.Name), rcv2}, - {fmt.Sprintf(addon_service_v1, namespace.Name), svcv1}, - {fmt.Sprintf(addon_service_v2, namespace.Name), svcv2}, - {fmt.Sprintf(invalid_addon_controller_v1, namespace.Name), rcInvalid}, + {fmt.Sprintf(addon_controller_v2, f.Namespace.Name), rcv2}, + {fmt.Sprintf(addon_service_v1, f.Namespace.Name), svcv1}, + {fmt.Sprintf(addon_service_v2, f.Namespace.Name), svcv2}, + {fmt.Sprintf(invalid_addon_controller_v1, f.Namespace.Name), rcInvalid}, {fmt.Sprintf(invalid_addon_service_v1, defaultNsName), svcInvalid}, } @@ -302,8 +281,8 @@ var _ = Describe("Addon update", func() { sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1)) - waitForServiceInAddonTest(c, namespace.Name, "addon-test", true) - waitForReplicationControllerInAddonTest(c, defaultNsName, "addon-test-v1", true) + waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", true) + waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", true) By("update manifests") sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2)) @@ -316,27 +295,27 @@ var _ = Describe("Addon update", func() { * But it is ok - as long as we don't have rolling update, the result will be the same */ - waitForServiceInAddonTest(c, namespace.Name, "addon-test-updated", true) - waitForReplicationControllerInAddonTest(c, namespace.Name, "addon-test-v2", true) + waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", true) + waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", true) - waitForServiceInAddonTest(c, namespace.Name, "addon-test", false) - waitForReplicationControllerInAddonTest(c, defaultNsName, "addon-test-v1", false) + waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", false) + waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", false) By("remove manifests") sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2)) - waitForServiceInAddonTest(c, namespace.Name, "addon-test-updated", false) - waitForReplicationControllerInAddonTest(c, namespace.Name, "addon-test-v2", false) + waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", false) + waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", false) By("verify invalid API addons weren't created") - _, err = c.ReplicationControllers(namespace.Name).Get("invalid-addon-test-v1") + _, err = f.Client.ReplicationControllers(f.Namespace.Name).Get("invalid-addon-test-v1") Expect(err).To(HaveOccurred()) - _, err = c.ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1") + _, err = f.Client.ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1") Expect(err).To(HaveOccurred()) - _, err = c.Services(namespace.Name).Get("ivalid-addon-test") + _, err = f.Client.Services(f.Namespace.Name).Get("ivalid-addon-test") Expect(err).To(HaveOccurred()) - _, err = c.Services(defaultNsName).Get("ivalid-addon-test") + _, err = f.Client.Services(defaultNsName).Get("ivalid-addon-test") Expect(err).To(HaveOccurred()) // invalid addons will be deleted by the deferred function From e1d76d5f19e84f87d4e7e51618a38ade9eda524c Mon Sep 17 00:00:00 2001 From: Gurvinder Singh Date: Thu, 30 Jul 2015 09:47:56 +0200 Subject: [PATCH 39/49] added test for spark driver too --- examples/examples_test.go | 1 + test/e2e/examples.go | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/examples_test.go b/examples/examples_test.go index bdad9c9b06e..d5e42f526a2 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -317,6 +317,7 @@ func TestExampleObjectSchemas(t *testing.T) { "spark-master-service": &api.Service{}, "spark-master": &api.Pod{}, "spark-worker-controller": &api.ReplicationController{}, + "spark-driver": &api.Pod{}, }, "../examples/storm": { "storm-nimbus-service": &api.Service{}, diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 75a8d6de6be..6c3d4ef3090 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -159,22 +159,26 @@ var _ = Describe("Examples e2e", func() { }) Describe("[Skipped][Example]Spark", func() { - It("should start spark master and workers", func() { + It("should start spark master, driver and workers", func() { mkpath := func(file string) string { return filepath.Join(testContext.RepoRoot, "examples", "spark", file) } serviceJson := mkpath("spark-master-service.json") masterJson := mkpath("spark-master.json") + driverJson := mkpath("spark-driver.json") workerControllerJson := mkpath("spark-worker-controller.json") nsFlag := fmt.Sprintf("--namespace=%v", ns) By("starting master") runKubectl("create", "-f", serviceJson, nsFlag) runKubectl("create", "-f", masterJson, nsFlag) + runKubectl("create", "-f", driverJson, nsFlag) err := waitForPodRunningInNamespace(c, "spark-master", ns) Expect(err).NotTo(HaveOccurred()) _, err = lookForStringInLog(ns, "spark-master", "spark-master", "Starting Spark master at", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) + _, err = lookForStringInLog(ns, "spark-driver", "spark-driver", "Starting Spark driver at", serverStartTimeout) + Expect(err).NotTo(HaveOccurred()) By("starting workers") runKubectl("create", "-f", workerControllerJson, nsFlag) From 2c5c1931e53939fd3588a5d8643b491d015c89fb Mon Sep 17 00:00:00 2001 From: Wojciech Tyczynski Date: Thu, 30 Jul 2015 15:29:01 +0200 Subject: [PATCH 40/49] Revert "Implement 'Nodes Network' test for GKE" --- cluster/gke/util.sh | 5 +++-- hack/ginkgo-e2e.sh | 2 +- test/e2e/resize_nodes.go | 18 ++++++++++++++++-- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/cluster/gke/util.sh b/cluster/gke/util.sh index 1828324a5f7..b35d9ba53be 100755 --- a/cluster/gke/util.sh +++ b/cluster/gke/util.sh @@ -210,17 +210,18 @@ function get-password() { | grep password | cut -f 4 -d ' ') } -# Detect the IP for the master. Note that on GKE, we don't know the name of the -# master, so KUBE_MASTER is not set. +# Detect the instance name and IP for the master # # Assumed vars: # ZONE # CLUSTER_NAME # Vars set: +# KUBE_MASTER # KUBE_MASTER_IP function detect-master() { echo "... in gke:detect-master()" >&2 detect-project >&2 + KUBE_MASTER="k8s-${CLUSTER_NAME}-master" KUBE_MASTER_IP=$("${GCLOUD}" "${CMD_GROUP}" container clusters describe \ --project="${PROJECT}" --zone="${ZONE}" "${CLUSTER_NAME}" \ | grep endpoint | cut -f 2 -d ' ') diff --git a/hack/ginkgo-e2e.sh b/hack/ginkgo-e2e.sh index ce349e4b751..d7e1b608f86 100755 --- a/hack/ginkgo-e2e.sh +++ b/hack/ginkgo-e2e.sh @@ -89,7 +89,7 @@ fi export PATH=$(dirname "${e2e_test}"):"${PATH}" "${ginkgo}" "${ginkgo_args[@]:+${ginkgo_args[@]}}" "${e2e_test}" -- \ "${auth_config[@]:+${auth_config[@]}}" \ - --host="https://${KUBE_MASTER_IP:-}" \ + --host="https://${KUBE_MASTER_IP-}" \ --provider="${KUBERNETES_PROVIDER}" \ --gce-project="${PROJECT:-}" \ --gce-zone="${ZONE:-}" \ diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index b5ec8a32637..1029b4722d5 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -326,8 +326,22 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica Failf("Couldn't get the external IP of host %s with addresses %v", node.Name, node.Status.Addresses) } By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) - iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump DROP", - strings.TrimPrefix(testContext.Host, "https://")) + + // TODO marekbiskup 2015-06-19 #10085 + // The use of MasterName will cause iptables to do a DNS lookup to + // resolve the name to an IP address, which will slow down the test + // and cause it to fail if DNS is absent or broken. + // Use the IP address instead. + + destination := testContext.CloudConfig.MasterName + if providerIs("aws") { + // This is the (internal) IP address used on AWS for the master + // TODO: Use IP address for all clouds? + // TODO: Avoid hard-coding this + destination = "172.20.0.9" + } + + iptablesRule := fmt.Sprintf("OUTPUT --destination %s --jump DROP", destination) defer func() { // This code will execute even if setting the iptables rule failed. // It is on purpose because we may have an error even if the new rule From cf011cad5577ca50e0a8a6a01103f9270c6606e2 Mon Sep 17 00:00:00 2001 From: Alex Robinson Date: Thu, 30 Jul 2015 16:28:47 +0000 Subject: [PATCH 41/49] Support passing a header to the manifest URL in the kubelet. Needed to support using GCE's v1 metadata API, which requires passing the header "Metadata-Flavor: Google". --- cmd/kubelet/app/server.go | 17 +++++++++-- pkg/kubelet/config/http.go | 12 ++++++-- pkg/kubelet/config/http_test.go | 53 ++++++++++++++++++++++++++++++--- 3 files changed, 74 insertions(+), 8 deletions(-) diff --git a/cmd/kubelet/app/server.go b/cmd/kubelet/app/server.go index 71a48c91c52..9200cf8b1d3 100644 --- a/cmd/kubelet/app/server.go +++ b/cmd/kubelet/app/server.go @@ -69,6 +69,7 @@ type KubeletServer struct { FileCheckFrequency time.Duration HTTPCheckFrequency time.Duration ManifestURL string + ManifestURLHeader string EnableServer bool Address util.IP Port uint @@ -193,6 +194,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) { fs.DurationVar(&s.FileCheckFrequency, "file-check-frequency", s.FileCheckFrequency, "Duration between checking config files for new data") fs.DurationVar(&s.HTTPCheckFrequency, "http-check-frequency", s.HTTPCheckFrequency, "Duration between checking http for new data") fs.StringVar(&s.ManifestURL, "manifest-url", s.ManifestURL, "URL for accessing the container manifest") + fs.StringVar(&s.ManifestURLHeader, "manifest-url-header", s.ManifestURLHeader, "HTTP header to use when accessing the manifest URL, with the key separated from the value with a ':', as in 'key:value'") fs.BoolVar(&s.EnableServer, "enable-server", s.EnableServer, "Enable the Kubelet's server") fs.Var(&s.Address, "address", "The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces)") fs.UintVar(&s.Port, "port", s.Port, "The port for the Kubelet to serve on. Note that \"kubectl logs\" will not work if you set this flag.") // see #9325 @@ -295,6 +297,15 @@ func (s *KubeletServer) Run(_ []string) error { } glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile) + manifestURLHeader := make(http.Header) + if s.ManifestURLHeader != "" { + pieces := strings.Split(s.ManifestURLHeader, ":") + if len(pieces) != 2 { + return fmt.Errorf("manifest-url-header must have a single ':' key-value separator, got %q", s.ManifestURLHeader) + } + manifestURLHeader.Set(pieces[0], pieces[1]) + } + hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ",")) if err != nil { return err @@ -330,6 +341,7 @@ func (s *KubeletServer) Run(_ []string) error { RootDirectory: s.RootDirectory, ConfigFile: s.Config, ManifestURL: s.ManifestURL, + ManifestURLHeader: manifestURLHeader, FileCheckFrequency: s.FileCheckFrequency, HTTPCheckFrequency: s.HTTPCheckFrequency, PodInfraContainerImage: s.PodInfraContainerImage, @@ -660,8 +672,8 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig { // define url config source if kc.ManifestURL != "" { - glog.Infof("Adding manifest url: %v", kc.ManifestURL) - config.NewSourceURL(kc.ManifestURL, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubelet.HTTPSource)) + glog.Infof("Adding manifest url %q with HTTP header %v", kc.ManifestURL, kc.ManifestURLHeader) + config.NewSourceURL(kc.ManifestURL, kc.ManifestURLHeader, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubelet.HTTPSource)) } if kc.KubeClient != nil { glog.Infof("Watching apiserver") @@ -683,6 +695,7 @@ type KubeletConfig struct { RootDirectory string ConfigFile string ManifestURL string + ManifestURLHeader http.Header FileCheckFrequency time.Duration HTTPCheckFrequency time.Duration Hostname string diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index de536ae3b74..8e2c0d96fc5 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -33,14 +33,16 @@ import ( type sourceURL struct { url string + header http.Header nodeName string updates chan<- interface{} data []byte } -func NewSourceURL(url, nodeName string, period time.Duration, updates chan<- interface{}) { +func NewSourceURL(url string, header http.Header, nodeName string, period time.Duration, updates chan<- interface{}) { config := &sourceURL{ url: url, + header: header, nodeName: nodeName, updates: updates, data: nil, @@ -60,7 +62,13 @@ func (s *sourceURL) applyDefaults(pod *api.Pod) error { } func (s *sourceURL) extractFromURL() error { - resp, err := http.Get(s.url) + req, err := http.NewRequest("GET", s.url, nil) + if err != nil { + return err + } + req.Header = s.header + client := &http.Client{} + resp, err := client.Do(req) if err != nil { return err } diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index ab3487993bf..3c71f310c9d 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -18,6 +18,7 @@ package config import ( "encoding/json" + "net/http" "net/http/httptest" "testing" "time" @@ -33,7 +34,7 @@ import ( func TestURLErrorNotExistNoUpdate(t *testing.T) { ch := make(chan interface{}) - NewSourceURL("http://localhost:49575/_not_found_", "localhost", time.Millisecond, ch) + NewSourceURL("http://localhost:49575/_not_found_", http.Header{}, "localhost", time.Millisecond, ch) select { case got := <-ch: t.Errorf("Expected no update, Got %#v", got) @@ -43,7 +44,7 @@ func TestURLErrorNotExistNoUpdate(t *testing.T) { func TestExtractFromHttpBadness(t *testing.T) { ch := make(chan interface{}, 1) - c := sourceURL{"http://localhost:49575/_not_found_", "other", ch, nil} + c := sourceURL{"http://localhost:49575/_not_found_", http.Header{}, "other", ch, nil} if err := c.extractFromURL(); err == nil { t.Errorf("Expected error") } @@ -112,7 +113,7 @@ func TestExtractInvalidPods(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() ch := make(chan interface{}, 1) - c := sourceURL{testServer.URL, "localhost", ch, nil} + c := sourceURL{testServer.URL, http.Header{}, "localhost", ch, nil} if err := c.extractFromURL(); err == nil { t.Errorf("%s: Expected error", testCase.desc) } @@ -259,7 +260,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() ch := make(chan interface{}, 1) - c := sourceURL{testServer.URL, hostname, ch, nil} + c := sourceURL{testServer.URL, http.Header{}, hostname, ch, nil} if err := c.extractFromURL(); err != nil { t.Errorf("%s: Unexpected error: %v", testCase.desc, err) continue @@ -276,3 +277,47 @@ func TestExtractPodsFromHTTP(t *testing.T) { } } } + +func TestURLWithHeader(t *testing.T) { + pod := &api.Pod{ + TypeMeta: api.TypeMeta{ + APIVersion: testapi.Version(), + Kind: "Pod", + }, + ObjectMeta: api.ObjectMeta{ + Name: "foo", + UID: "111", + Namespace: "mynamespace", + }, + Spec: api.PodSpec{ + NodeName: "localhost", + Containers: []api.Container{{Name: "1", Image: "foo", ImagePullPolicy: api.PullAlways}}, + }, + } + data, err := json.Marshal(pod) + if err != nil { + t.Fatalf("Unexpected json marshalling error: %v", err) + } + fakeHandler := util.FakeHandler{ + StatusCode: 200, + ResponseBody: string(data), + } + testServer := httptest.NewServer(&fakeHandler) + defer testServer.Close() + ch := make(chan interface{}, 1) + header := make(http.Header) + header.Set("Metadata-Flavor", "Google") + c := sourceURL{testServer.URL, header, "localhost", ch, nil} + if err := c.extractFromURL(); err != nil { + t.Fatalf("Unexpected error extracting from URL: %v", err) + } + update := (<-ch).(kubelet.PodUpdate) + + headerVal := fakeHandler.RequestReceived.Header["Metadata-Flavor"] + if len(headerVal) != 1 || headerVal[0] != "Google" { + t.Errorf("Header missing expected entry %v. Got %v", header, fakeHandler.RequestReceived.Header) + } + if len(update.Pods) != 1 { + t.Errorf("Received wrong number of pods, expected one: %v", update.Pods) + } +} From c08ad6b7c1076e7a7f70aa9dd5774866bf59b406 Mon Sep 17 00:00:00 2001 From: Vishnu Kannan Date: Thu, 30 Jul 2015 10:45:26 -0700 Subject: [PATCH 42/49] Avoid mounting ssl volumes for heapster container. --- .../cluster-monitoring/google/heapster-controller.yaml | 8 -------- .../googleinfluxdb/heapster-controller-combined.yaml | 8 -------- 2 files changed, 16 deletions(-) diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 406cdfbcdd5..4c2a019bde5 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -33,11 +33,3 @@ spec: - --sink=gcl - --poll_duration=2m - --stats_resolution=1m - volumeMounts: - - name: ssl-certs - mountPath: /etc/ssl/certs - readOnly: true - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs" diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index b9123f157e7..9932e6e2f38 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -33,11 +33,3 @@ spec: - --sink=influxdb:http://monitoring-influxdb:8086 - --poll_duration=2m - --stats_resolution=1m - volumeMounts: - - name: ssl-certs - mountPath: /etc/ssl/certs - readOnly: true - volumes: - - name: ssl-certs - hostPath: - path: "/etc/ssl/certs" From a9f0c4683f2bc7ce4377315694ef7122337d94a4 Mon Sep 17 00:00:00 2001 From: Alex Robinson Date: Thu, 30 Jul 2015 17:03:46 +0000 Subject: [PATCH 43/49] Limit the logging from kubelet attempting to read its manifest URL. Without this, it logs an error every 20 seconds if nothing is at the provided URL. --- pkg/kubelet/config/http.go | 25 +++++++++++++++++++------ pkg/kubelet/config/http_test.go | 8 ++++---- 2 files changed, 23 insertions(+), 10 deletions(-) diff --git a/pkg/kubelet/config/http.go b/pkg/kubelet/config/http.go index 8e2c0d96fc5..9467915b966 100644 --- a/pkg/kubelet/config/http.go +++ b/pkg/kubelet/config/http.go @@ -32,11 +32,12 @@ import ( ) type sourceURL struct { - url string - header http.Header - nodeName string - updates chan<- interface{} - data []byte + url string + header http.Header + nodeName string + updates chan<- interface{} + data []byte + failureLogs int } func NewSourceURL(url string, header http.Header, nodeName string, period time.Duration, updates chan<- interface{}) { @@ -53,7 +54,19 @@ func NewSourceURL(url string, header http.Header, nodeName string, period time.D func (s *sourceURL) run() { if err := s.extractFromURL(); err != nil { - glog.Errorf("Failed to read URL: %v", err) + // Don't log this multiple times per minute. The first few entries should be + // enough to get the point across. + if s.failureLogs < 3 { + glog.Warningf("Failed to read pods from URL: %v", err) + } else if s.failureLogs == 3 { + glog.Warningf("Failed to read pods from URL. Won't log this message anymore: %v", err) + } + s.failureLogs++ + } else { + if s.failureLogs > 0 { + glog.Info("Successfully read pods from URL.") + s.failureLogs = 0 + } } } diff --git a/pkg/kubelet/config/http_test.go b/pkg/kubelet/config/http_test.go index 3c71f310c9d..69e3faaa146 100644 --- a/pkg/kubelet/config/http_test.go +++ b/pkg/kubelet/config/http_test.go @@ -44,7 +44,7 @@ func TestURLErrorNotExistNoUpdate(t *testing.T) { func TestExtractFromHttpBadness(t *testing.T) { ch := make(chan interface{}, 1) - c := sourceURL{"http://localhost:49575/_not_found_", http.Header{}, "other", ch, nil} + c := sourceURL{"http://localhost:49575/_not_found_", http.Header{}, "other", ch, nil, 0} if err := c.extractFromURL(); err == nil { t.Errorf("Expected error") } @@ -113,7 +113,7 @@ func TestExtractInvalidPods(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() ch := make(chan interface{}, 1) - c := sourceURL{testServer.URL, http.Header{}, "localhost", ch, nil} + c := sourceURL{testServer.URL, http.Header{}, "localhost", ch, nil, 0} if err := c.extractFromURL(); err == nil { t.Errorf("%s: Expected error", testCase.desc) } @@ -260,7 +260,7 @@ func TestExtractPodsFromHTTP(t *testing.T) { testServer := httptest.NewServer(&fakeHandler) defer testServer.Close() ch := make(chan interface{}, 1) - c := sourceURL{testServer.URL, http.Header{}, hostname, ch, nil} + c := sourceURL{testServer.URL, http.Header{}, hostname, ch, nil, 0} if err := c.extractFromURL(); err != nil { t.Errorf("%s: Unexpected error: %v", testCase.desc, err) continue @@ -307,7 +307,7 @@ func TestURLWithHeader(t *testing.T) { ch := make(chan interface{}, 1) header := make(http.Header) header.Set("Metadata-Flavor", "Google") - c := sourceURL{testServer.URL, header, "localhost", ch, nil} + c := sourceURL{testServer.URL, header, "localhost", ch, nil, 0} if err := c.extractFromURL(); err != nil { t.Fatalf("Unexpected error extracting from URL: %v", err) } From 94a387d5d1c3c39008cc31f4ccaee74316f34e0d Mon Sep 17 00:00:00 2001 From: Mike Danese Date: Thu, 30 Jul 2015 10:51:40 -0700 Subject: [PATCH 44/49] Revert "Improve conversion to support multiple packages" --- cmd/genconversion/conversion.go | 13 +- cmd/gendeepcopy/deep_copy.go | 13 +- hack/update-generated-conversions.sh | 8 ++ pkg/api/deep_copy_generated.go | 38 +++--- pkg/api/v1/conversion_generated.go | 55 ++++---- pkg/api/v1/deep_copy_generated.go | 36 ++--- pkg/runtime/conversion_generator.go | 177 ++++--------------------- pkg/runtime/deep_copy_generator.go | 188 ++++++--------------------- 8 files changed, 139 insertions(+), 389 deletions(-) diff --git a/cmd/genconversion/conversion.go b/cmd/genconversion/conversion.go index c60b8f1dacd..c25fb0d0aad 100644 --- a/cmd/genconversion/conversion.go +++ b/cmd/genconversion/conversion.go @@ -17,16 +17,13 @@ limitations under the License. package main import ( - "fmt" "io" "os" - "path" "runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -53,9 +50,7 @@ func main() { funcOut = file } - generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", *version)) - apiShort := generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") - generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource") + generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw()) // TODO(wojtek-t): Change the overwrites to a flag. generator.OverwritePackage(*version, "") for _, knownType := range api.Scheme.KnownTypes(*version) { @@ -63,14 +58,10 @@ func main() { glog.Errorf("error while generating conversion functions for %v: %v", knownType, err) } } - generator.RepackImports(util.NewStringSet()) - if err := generator.WriteImports(funcOut); err != nil { - glog.Fatalf("error while writing imports: %v", err) - } if err := generator.WriteConversionFunctions(funcOut); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } - if err := generator.RegisterConversionFunctions(funcOut, fmt.Sprintf("%s.Scheme", apiShort)); err != nil { + if err := generator.RegisterConversionFunctions(funcOut); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } } diff --git a/cmd/gendeepcopy/deep_copy.go b/cmd/gendeepcopy/deep_copy.go index dcb1c33766d..59ac43bb72f 100644 --- a/cmd/gendeepcopy/deep_copy.go +++ b/cmd/gendeepcopy/deep_copy.go @@ -19,14 +19,12 @@ package main import ( "io" "os" - "path" "runtime" "strings" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -55,14 +53,10 @@ func main() { } knownVersion := *version - registerTo := "api.Scheme" if knownVersion == "api" { knownVersion = api.Scheme.Raw().InternalVersion - registerTo = "Scheme" } - pkgPath := path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", knownVersion) - generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), pkgPath, util.NewStringSet("github.com/GoogleCloudPlatform/kubernetes")) - generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") + generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw()) for _, overwrite := range strings.Split(*overwrites, ",") { vals := strings.Split(overwrite, "=") @@ -73,14 +67,13 @@ func main() { glog.Errorf("error while generating deep copy functions for %v: %v", knownType, err) } } - generator.RepackImports() - if err := generator.WriteImports(funcOut); err != nil { + if err := generator.WriteImports(funcOut, *version); err != nil { glog.Fatalf("error while writing imports: %v", err) } if err := generator.WriteDeepCopyFunctions(funcOut); err != nil { glog.Fatalf("error while writing deep copy functions: %v", err) } - if err := generator.RegisterDeepCopyFunctions(funcOut, registerTo); err != nil { + if err := generator.RegisterDeepCopyFunctions(funcOut, *version); err != nil { glog.Fatalf("error while registering deep copy functions: %v", err) } } diff --git a/hack/update-generated-conversions.sh b/hack/update-generated-conversions.sh index 7df31ca18f8..d7b9f9fd591 100755 --- a/hack/update-generated-conversions.sh +++ b/hack/update-generated-conversions.sh @@ -33,6 +33,14 @@ function generate_version() { cat >> $TMPFILE < 0 { - name = dirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { - name = subdirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - } - } - for i := 2; i < 100; i++ { - generatedName := fmt.Sprintf("%s%d", name, i) - if _, ok := g.shortImports[generatedName]; !ok { - g.imports[pkg] = generatedName - g.shortImports[generatedName] = pkg - return generatedName - } - } - panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) -} - func (g *conversionGenerator) typeName(inType reflect.Type) string { switch inType.Kind() { + case reflect.Map: + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) - case reflect.Map: - if len(inType.Name()) == 0 { - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) - } - fallthrough default: - pkg, name := inType.PkgPath(), inType.Name() - if len(name) == 0 && inType.Kind() == reflect.Struct { - return "struct{}" - } - if len(pkg) == 0 { + typeWithPkg := fmt.Sprintf("%s", inType) + slices := strings.Split(typeWithPkg, ".") + if len(slices) == 1 { // Default package. - return name + return slices[0] } - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val + if len(slices) == 2 { + pkg := slices[0] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val + } + if pkg != "" { + pkg = pkg + "." + } + return pkg + slices[1] } - if len(pkg) == 0 { - return name - } - short := g.addImportByPath(pkg) - if len(short) > 0 { - return fmt.Sprintf("%s.%s", short, name) - } - return name + panic("Incorrect type name: " + typeWithPkg) } } @@ -785,10 +658,6 @@ func (g *conversionGenerator) existsDedicatedConversionFunction(inType, outType // unnamed. Thus we return false here. return false } - // TODO: no way to handle private conversions in different packages - if g.assumePrivateConversions { - return false - } return g.scheme.Converter().HasConversionFunc(inType, outType) } diff --git a/pkg/runtime/deep_copy_generator.go b/pkg/runtime/deep_copy_generator.go index 20d931caa0a..7be7af6bd86 100644 --- a/pkg/runtime/deep_copy_generator.go +++ b/pkg/runtime/deep_copy_generator.go @@ -19,7 +19,6 @@ package runtime import ( "fmt" "io" - "path" "reflect" "sort" "strings" @@ -39,20 +38,9 @@ type DeepCopyGenerator interface { // functions for this type and all nested types will be generated. AddType(inType reflect.Type) error - // ReplaceType registers a type that should be used instead of the type - // with the provided pkgPath and name. - ReplaceType(pkgPath, name string, in interface{}) - - // AddImport registers a package name with the generator and returns its - // short name. - AddImport(pkgPath string) string - - // RepackImports creates a stable ordering of import short names - RepackImports() - // Writes all imports that are necessary for deep-copy function and // their registration. - WriteImports(w io.Writer) error + WriteImports(w io.Writer, pkg string) error // Writes deel-copy functions for all types added via AddType() method // and their nested types. @@ -69,80 +57,20 @@ type DeepCopyGenerator interface { OverwritePackage(pkg, overwrite string) } -func NewDeepCopyGenerator(scheme *conversion.Scheme, targetPkg string, include util.StringSet) DeepCopyGenerator { - g := &deepCopyGenerator{ +func NewDeepCopyGenerator(scheme *conversion.Scheme) DeepCopyGenerator { + return &deepCopyGenerator{ scheme: scheme, - targetPkg: targetPkg, copyables: make(map[reflect.Type]bool), - imports: make(map[string]string), - shortImports: make(map[string]string), + imports: util.StringSet{}, pkgOverwrites: make(map[string]string), - replace: make(map[pkgPathNamePair]reflect.Type), - include: include, } - g.targetPackage(targetPkg) - g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") - return g -} - -type pkgPathNamePair struct { - PkgPath string - Name string } type deepCopyGenerator struct { - scheme *conversion.Scheme - targetPkg string - copyables map[reflect.Type]bool - // map of package names to shortname - imports map[string]string - // map of short names to package names - shortImports map[string]string + scheme *conversion.Scheme + copyables map[reflect.Type]bool + imports util.StringSet pkgOverwrites map[string]string - replace map[pkgPathNamePair]reflect.Type - include util.StringSet -} - -func (g *deepCopyGenerator) addImportByPath(pkg string) string { - if name, ok := g.imports[pkg]; ok { - return name - } - name := path.Base(pkg) - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 { - name = dirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { - name = subdirname + name - if _, ok := g.shortImports[name]; !ok { - g.imports[pkg] = name - g.shortImports[name] = pkg - return name - } - } - } - for i := 2; i < 100; i++ { - generatedName := fmt.Sprintf("%s%d", name, i) - if _, ok := g.shortImports[generatedName]; !ok { - g.imports[pkg] = generatedName - g.shortImports[generatedName] = pkg - return generatedName - } - } - panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) -} - -func (g *deepCopyGenerator) targetPackage(pkg string) { - g.imports[pkg] = "" - g.shortImports[""] = pkg } func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { @@ -162,18 +90,11 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return err } case reflect.Interface: - g.addImportByPath(inType.PkgPath()) + g.imports.Insert(inType.PkgPath()) return nil case reflect.Struct: - g.addImportByPath(inType.PkgPath()) - found := false - for s := range g.include { - if strings.HasPrefix(inType.PkgPath(), s) { - found = true - break - } - } - if !found { + g.imports.Insert(inType.PkgPath()) + if !strings.HasPrefix(inType.PkgPath(), "github.com/GoogleCloudPlatform/kubernetes") { return nil } for i := 0; i < inType.NumField(); i++ { @@ -189,15 +110,6 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return nil } -func (g *deepCopyGenerator) AddImport(pkg string) string { - return g.addImportByPath(pkg) -} - -// ReplaceType registers a replacement type to be used instead of the named type -func (g *deepCopyGenerator) ReplaceType(pkgPath, name string, t interface{}) { - g.replace[pkgPathNamePair{pkgPath, name}] = reflect.TypeOf(t) -} - func (g *deepCopyGenerator) AddType(inType reflect.Type) error { if inType.Kind() != reflect.Struct { return fmt.Errorf("non-struct copies are not supported") @@ -205,23 +117,10 @@ func (g *deepCopyGenerator) AddType(inType reflect.Type) error { return g.addAllRecursiveTypes(inType) } -func (g *deepCopyGenerator) RepackImports() { - var packages []string - for key := range g.imports { - packages = append(packages, key) - } - sort.Strings(packages) - g.imports = make(map[string]string) - g.shortImports = make(map[string]string) - - g.targetPackage(g.targetPkg) - for _, pkg := range packages { - g.addImportByPath(pkg) - } -} - -func (g *deepCopyGenerator) WriteImports(w io.Writer) error { +func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error { var packages []string + packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/api") + packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") for key := range g.imports { packages = append(packages, key) } @@ -231,13 +130,10 @@ func (g *deepCopyGenerator) WriteImports(w io.Writer) error { indent := 0 buffer.addLine("import (\n", indent) for _, importPkg := range packages { - if len(importPkg) == 0 { + if strings.HasSuffix(importPkg, pkg) { continue } - if len(g.imports[importPkg]) == 0 { - continue - } - buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1) + buffer.addLine(fmt.Sprintf("\"%s\"\n", importPkg), indent+1) } buffer.addLine(")\n", indent) buffer.addLine("\n", indent) @@ -263,47 +159,35 @@ func (s byPkgAndName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (g *deepCopyGenerator) nameForType(inType reflect.Type) string { +func (g *deepCopyGenerator) typeName(inType reflect.Type) string { switch inType.Kind() { + case reflect.Map: + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) - case reflect.Map: - if len(inType.Name()) == 0 { - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) - } - fallthrough default: - pkg, name := inType.PkgPath(), inType.Name() - if len(name) == 0 && inType.Kind() == reflect.Struct { - return "struct{}" - } - if len(pkg) == 0 { + typeWithPkg := fmt.Sprintf("%s", inType) + slices := strings.Split(typeWithPkg, ".") + if len(slices) == 1 { // Default package. - return name + return slices[0] } - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val + if len(slices) == 2 { + pkg := slices[0] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val + } + if pkg != "" { + pkg = pkg + "." + } + return pkg + slices[1] } - if len(pkg) == 0 { - return name - } - short := g.addImportByPath(pkg) - if len(short) > 0 { - return fmt.Sprintf("%s.%s", short, name) - } - return name + panic("Incorrect type name: " + typeWithPkg) } } -func (g *deepCopyGenerator) typeName(inType reflect.Type) string { - if t, ok := g.replace[pkgPathNamePair{inType.PkgPath(), inType.Name()}]; ok { - return g.nameForType(t) - } - return g.nameForType(inType) -} - func (g *deepCopyGenerator) deepCopyFunctionName(inType reflect.Type) string { funcNameFormat := "deepCopy_%s_%s" inPkg := packageForName(inType) @@ -558,8 +442,12 @@ func (g *deepCopyGenerator) writeDeepCopyForType(b *buffer, inType reflect.Type, func (g *deepCopyGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) { b.addLine("func init() {\n", indent) - registerFormat := "err := %s.AddGeneratedDeepCopyFuncs(\n" - b.addLine(fmt.Sprintf(registerFormat, pkg), indent+1) + registerFormat := "err := %sScheme.AddGeneratedDeepCopyFuncs(\n" + if pkg == "api" { + b.addLine(fmt.Sprintf(registerFormat, ""), indent+1) + } else { + b.addLine(fmt.Sprintf(registerFormat, "api."), indent+1) + } } func (g *deepCopyGenerator) writeRegisterFooter(b *buffer, indent int) { From 55f574c2678deb87670f0280e2d6123dcde46ce9 Mon Sep 17 00:00:00 2001 From: deads2k Date: Mon, 29 Jun 2015 14:39:48 -0400 Subject: [PATCH 45/49] switch kubeconfig types to internal map[string]*struct --- pkg/client/clientcmd/api/helpers.go | 6 +- pkg/client/clientcmd/api/helpers_test.go | 6 +- pkg/client/clientcmd/api/types.go | 32 +++--- pkg/client/clientcmd/api/types_test.go | 14 +-- pkg/client/clientcmd/api/v1/conversion.go | 40 +++---- pkg/client/clientcmd/client_config_test.go | 18 ++-- pkg/client/clientcmd/loader.go | 4 +- pkg/client/clientcmd/loader_test.go | 36 +++---- pkg/client/clientcmd/validation.go | 12 +-- pkg/client/clientcmd/validation_test.go | 48 ++++----- pkg/kubectl/cmd/config/config_test.go | 102 ++++++++---------- pkg/kubectl/cmd/config/create_authinfo.go | 8 +- pkg/kubectl/cmd/config/create_cluster.go | 8 +- pkg/kubectl/cmd/config/create_context.go | 8 +- .../cmd/config/navigation_step_parser.go | 6 +- .../cmd/config/navigation_step_parser_test.go | 5 +- pkg/kubectl/cmd/config/set.go | 18 +--- 17 files changed, 181 insertions(+), 190 deletions(-) diff --git a/pkg/client/clientcmd/api/helpers.go b/pkg/client/clientcmd/api/helpers.go index 5ea762a3c57..e2d56570c6e 100644 --- a/pkg/client/clientcmd/api/helpers.go +++ b/pkg/client/clientcmd/api/helpers.go @@ -43,10 +43,10 @@ func MinifyConfig(config *Config) error { return fmt.Errorf("cannot locate context %v", config.CurrentContext) } - newContexts := map[string]Context{} + newContexts := map[string]*Context{} newContexts[config.CurrentContext] = currContext - newClusters := map[string]Cluster{} + newClusters := map[string]*Cluster{} if len(currContext.Cluster) > 0 { if _, exists := config.Clusters[currContext.Cluster]; !exists { return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) @@ -55,7 +55,7 @@ func MinifyConfig(config *Config) error { newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] } - newAuthInfos := map[string]AuthInfo{} + newAuthInfos := map[string]*AuthInfo{} if len(currContext.AuthInfo) > 0 { if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) diff --git a/pkg/client/clientcmd/api/helpers_test.go b/pkg/client/clientcmd/api/helpers_test.go index aa62ab1ec40..ca970f3b4ca 100644 --- a/pkg/client/clientcmd/api/helpers_test.go +++ b/pkg/client/clientcmd/api/helpers_test.go @@ -38,13 +38,13 @@ func newMergedConfig(certFile, certContent, keyFile, keyContent, caFile, caConte } return Config{ - AuthInfos: map[string]AuthInfo{ + AuthInfos: map[string]*AuthInfo{ "red-user": {Token: "red-token", ClientCertificateData: []byte(certContent), ClientKeyData: []byte(keyContent)}, "blue-user": {Token: "blue-token", ClientCertificate: certFile, ClientKey: keyFile}}, - Clusters: map[string]Cluster{ + Clusters: map[string]*Cluster{ "cow-cluster": {Server: "http://cow.org:8080", CertificateAuthorityData: []byte(caContent)}, "chicken-cluster": {Server: "http://chicken.org:8080", CertificateAuthority: caFile}}, - Contexts: map[string]Context{ + Contexts: map[string]*Context{ "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"}, "shaker-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster"}}, CurrentContext: "federal-context", diff --git a/pkg/client/clientcmd/api/types.go b/pkg/client/clientcmd/api/types.go index fb18e0f1169..434bfa2b23b 100644 --- a/pkg/client/clientcmd/api/types.go +++ b/pkg/client/clientcmd/api/types.go @@ -33,21 +33,21 @@ type Config struct { // Preferences holds general information to be use for cli interactions Preferences Preferences `json:"preferences"` // Clusters is a map of referencable names to cluster configs - Clusters map[string]Cluster `json:"clusters"` + Clusters map[string]*Cluster `json:"clusters"` // AuthInfos is a map of referencable names to user configs - AuthInfos map[string]AuthInfo `json:"users"` + AuthInfos map[string]*AuthInfo `json:"users"` // Contexts is a map of referencable names to context configs - Contexts map[string]Context `json:"contexts"` + Contexts map[string]*Context `json:"contexts"` // CurrentContext is the name of the context that you would like to use by default CurrentContext string `json:"current-context"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` + Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"` } type Preferences struct { Colors bool `json:"colors,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` + Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"` } // Cluster contains information about how to communicate with a kubernetes cluster @@ -65,7 +65,7 @@ type Cluster struct { // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` + Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"` } // AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. @@ -87,7 +87,7 @@ type AuthInfo struct { // Password is the password for basic authentication to the kubernetes cluster. Password string `json:"password,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` + Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"` } // Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) @@ -101,36 +101,36 @@ type Context struct { // Namespace is the default namespace to use on unspecified requests Namespace string `json:"namespace,omitempty"` // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"` + Extensions map[string]*runtime.EmbeddedObject `json:"extensions,omitempty"` } // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewConfig() *Config { return &Config{ Preferences: *NewPreferences(), - Clusters: make(map[string]Cluster), - AuthInfos: make(map[string]AuthInfo), - Contexts: make(map[string]Context), - Extensions: make(map[string]runtime.EmbeddedObject), + Clusters: make(map[string]*Cluster), + AuthInfos: make(map[string]*AuthInfo), + Contexts: make(map[string]*Context), + Extensions: make(map[string]*runtime.EmbeddedObject), } } // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewContext() *Context { - return &Context{Extensions: make(map[string]runtime.EmbeddedObject)} + return &Context{Extensions: make(map[string]*runtime.EmbeddedObject)} } // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewCluster() *Cluster { - return &Cluster{Extensions: make(map[string]runtime.EmbeddedObject)} + return &Cluster{Extensions: make(map[string]*runtime.EmbeddedObject)} } // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewAuthInfo() *AuthInfo { - return &AuthInfo{Extensions: make(map[string]runtime.EmbeddedObject)} + return &AuthInfo{Extensions: make(map[string]*runtime.EmbeddedObject)} } // NewConfig is a convenience function that returns a new Config object with non-nil maps func NewPreferences() *Preferences { - return &Preferences{Extensions: make(map[string]runtime.EmbeddedObject)} + return &Preferences{Extensions: make(map[string]*runtime.EmbeddedObject)} } diff --git a/pkg/client/clientcmd/api/types_test.go b/pkg/client/clientcmd/api/types_test.go index 3caf18fae62..af351bf2f42 100644 --- a/pkg/client/clientcmd/api/types_test.go +++ b/pkg/client/clientcmd/api/types_test.go @@ -42,35 +42,35 @@ func ExampleEmptyConfig() { func ExampleOfOptionsConfig() { defaultConfig := NewConfig() defaultConfig.Preferences.Colors = true - defaultConfig.Clusters["alfa"] = Cluster{ + defaultConfig.Clusters["alfa"] = &Cluster{ Server: "https://alfa.org:8080", APIVersion: "v1beta2", InsecureSkipTLSVerify: true, CertificateAuthority: "path/to/my/cert-ca-filename", } - defaultConfig.Clusters["bravo"] = Cluster{ + defaultConfig.Clusters["bravo"] = &Cluster{ Server: "https://bravo.org:8080", APIVersion: "v1beta1", InsecureSkipTLSVerify: false, } - defaultConfig.AuthInfos["white-mage-via-cert"] = AuthInfo{ + defaultConfig.AuthInfos["white-mage-via-cert"] = &AuthInfo{ ClientCertificate: "path/to/my/client-cert-filename", ClientKey: "path/to/my/client-key-filename", } - defaultConfig.AuthInfos["red-mage-via-token"] = AuthInfo{ + defaultConfig.AuthInfos["red-mage-via-token"] = &AuthInfo{ Token: "my-secret-token", } - defaultConfig.Contexts["bravo-as-black-mage"] = Context{ + defaultConfig.Contexts["bravo-as-black-mage"] = &Context{ Cluster: "bravo", AuthInfo: "black-mage-via-file", Namespace: "yankee", } - defaultConfig.Contexts["alfa-as-black-mage"] = Context{ + defaultConfig.Contexts["alfa-as-black-mage"] = &Context{ Cluster: "alfa", AuthInfo: "black-mage-via-file", Namespace: "zulu", } - defaultConfig.Contexts["alfa-as-white-mage"] = Context{ + defaultConfig.Contexts["alfa-as-white-mage"] = &Context{ Cluster: "alfa", AuthInfo: "white-mage-via-cert", } diff --git a/pkg/client/clientcmd/api/v1/conversion.go b/pkg/client/clientcmd/api/v1/conversion.go index b88793c676f..5d7746ed5a3 100644 --- a/pkg/client/clientcmd/api/v1/conversion.go +++ b/pkg/client/clientcmd/api/v1/conversion.go @@ -57,19 +57,19 @@ func init() { return err } - out.Clusters = make(map[string]api.Cluster) + out.Clusters = make(map[string]*api.Cluster) if err := s.Convert(&in.Clusters, &out.Clusters, 0); err != nil { return err } - out.AuthInfos = make(map[string]api.AuthInfo) + out.AuthInfos = make(map[string]*api.AuthInfo) if err := s.Convert(&in.AuthInfos, &out.AuthInfos, 0); err != nil { return err } - out.Contexts = make(map[string]api.Context) + out.Contexts = make(map[string]*api.Context) if err := s.Convert(&in.Contexts, &out.Contexts, 0); err != nil { return err } - out.Extensions = make(map[string]runtime.EmbeddedObject) + out.Extensions = make(map[string]*runtime.EmbeddedObject) if err := s.Convert(&in.Extensions, &out.Extensions, 0); err != nil { return err } @@ -99,18 +99,18 @@ func init() { } return nil }, - func(in *[]NamedCluster, out *map[string]api.Cluster, s conversion.Scope) error { + func(in *[]NamedCluster, out *map[string]*api.Cluster, s conversion.Scope) error { for _, curr := range *in { newCluster := api.NewCluster() if err := s.Convert(&curr.Cluster, newCluster, 0); err != nil { return err } - (*out)[curr.Name] = *newCluster + (*out)[curr.Name] = newCluster } return nil }, - func(in *map[string]api.Cluster, out *[]NamedCluster, s conversion.Scope) error { + func(in *map[string]*api.Cluster, out *[]NamedCluster, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) @@ -120,7 +120,7 @@ func init() { for _, key := range allKeys { newCluster := (*in)[key] oldCluster := &Cluster{} - if err := s.Convert(&newCluster, oldCluster, 0); err != nil { + if err := s.Convert(newCluster, oldCluster, 0); err != nil { return err } @@ -130,18 +130,18 @@ func init() { return nil }, - func(in *[]NamedAuthInfo, out *map[string]api.AuthInfo, s conversion.Scope) error { + func(in *[]NamedAuthInfo, out *map[string]*api.AuthInfo, s conversion.Scope) error { for _, curr := range *in { newAuthInfo := api.NewAuthInfo() if err := s.Convert(&curr.AuthInfo, newAuthInfo, 0); err != nil { return err } - (*out)[curr.Name] = *newAuthInfo + (*out)[curr.Name] = newAuthInfo } return nil }, - func(in *map[string]api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { + func(in *map[string]*api.AuthInfo, out *[]NamedAuthInfo, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) @@ -151,7 +151,7 @@ func init() { for _, key := range allKeys { newAuthInfo := (*in)[key] oldAuthInfo := &AuthInfo{} - if err := s.Convert(&newAuthInfo, oldAuthInfo, 0); err != nil { + if err := s.Convert(newAuthInfo, oldAuthInfo, 0); err != nil { return err } @@ -161,18 +161,18 @@ func init() { return nil }, - func(in *[]NamedContext, out *map[string]api.Context, s conversion.Scope) error { + func(in *[]NamedContext, out *map[string]*api.Context, s conversion.Scope) error { for _, curr := range *in { newContext := api.NewContext() if err := s.Convert(&curr.Context, newContext, 0); err != nil { return err } - (*out)[curr.Name] = *newContext + (*out)[curr.Name] = newContext } return nil }, - func(in *map[string]api.Context, out *[]NamedContext, s conversion.Scope) error { + func(in *map[string]*api.Context, out *[]NamedContext, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) @@ -182,7 +182,7 @@ func init() { for _, key := range allKeys { newContext := (*in)[key] oldContext := &Context{} - if err := s.Convert(&newContext, oldContext, 0); err != nil { + if err := s.Convert(newContext, oldContext, 0); err != nil { return err } @@ -192,18 +192,18 @@ func init() { return nil }, - func(in *[]NamedExtension, out *map[string]runtime.EmbeddedObject, s conversion.Scope) error { + func(in *[]NamedExtension, out *map[string]*runtime.EmbeddedObject, s conversion.Scope) error { for _, curr := range *in { newExtension := &runtime.EmbeddedObject{} if err := s.Convert(&curr.Extension, newExtension, 0); err != nil { return err } - (*out)[curr.Name] = *newExtension + (*out)[curr.Name] = newExtension } return nil }, - func(in *map[string]runtime.EmbeddedObject, out *[]NamedExtension, s conversion.Scope) error { + func(in *map[string]*runtime.EmbeddedObject, out *[]NamedExtension, s conversion.Scope) error { allKeys := make([]string, 0, len(*in)) for key := range *in { allKeys = append(allKeys, key) @@ -213,7 +213,7 @@ func init() { for _, key := range allKeys { newExtension := (*in)[key] oldExtension := &runtime.RawExtension{} - if err := s.Convert(&newExtension, oldExtension, 0); err != nil { + if err := s.Convert(newExtension, oldExtension, 0); err != nil { return err } diff --git a/pkg/client/clientcmd/client_config_test.go b/pkg/client/clientcmd/client_config_test.go index e55bfa3b111..ed6dbdd85f3 100644 --- a/pkg/client/clientcmd/client_config_test.go +++ b/pkg/client/clientcmd/client_config_test.go @@ -32,14 +32,14 @@ func createValidTestConfig() *clientcmdapi.Config { ) config := clientcmdapi.NewConfig() - config.Clusters["clean"] = clientcmdapi.Cluster{ + config.Clusters["clean"] = &clientcmdapi.Cluster{ Server: server, APIVersion: latest.Version, } - config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ Token: token, } - config.Contexts["clean"] = clientcmdapi.Context{ + config.Contexts["clean"] = &clientcmdapi.Context{ Cluster: "clean", AuthInfo: "clean", } @@ -87,16 +87,16 @@ func TestCertificateData(t *testing.T) { keyData := []byte("key-data") config := clientcmdapi.NewConfig() - config.Clusters["clean"] = clientcmdapi.Cluster{ + config.Clusters["clean"] = &clientcmdapi.Cluster{ Server: "https://localhost:8443", APIVersion: latest.Version, CertificateAuthorityData: caData, } - config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ ClientCertificateData: certData, ClientKeyData: keyData, } - config.Contexts["clean"] = clientcmdapi.Context{ + config.Contexts["clean"] = &clientcmdapi.Context{ Cluster: "clean", AuthInfo: "clean", } @@ -120,15 +120,15 @@ func TestBasicAuthData(t *testing.T) { password := "mypass" config := clientcmdapi.NewConfig() - config.Clusters["clean"] = clientcmdapi.Cluster{ + config.Clusters["clean"] = &clientcmdapi.Cluster{ Server: "https://localhost:8443", APIVersion: latest.Version, } - config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ Username: username, Password: password, } - config.Contexts["clean"] = clientcmdapi.Context{ + config.Contexts["clean"] = &clientcmdapi.Context{ Cluster: "clean", AuthInfo: "clean", } diff --git a/pkg/client/clientcmd/loader.go b/pkg/client/clientcmd/loader.go index 7410e5b82cd..4422b69deed 100644 --- a/pkg/client/clientcmd/loader.go +++ b/pkg/client/clientcmd/loader.go @@ -226,14 +226,14 @@ func ResolveLocalPaths(filename string, config *clientcmdapi.Config) error { return fmt.Errorf("Could not determine the absolute path of config file %s: %v", filename, err) } - resolvedClusters := make(map[string]clientcmdapi.Cluster) + resolvedClusters := make(map[string]*clientcmdapi.Cluster) for key, cluster := range config.Clusters { cluster.CertificateAuthority = resolveLocalPath(configDir, cluster.CertificateAuthority) resolvedClusters[key] = cluster } config.Clusters = resolvedClusters - resolvedAuthInfos := make(map[string]clientcmdapi.AuthInfo) + resolvedAuthInfos := make(map[string]*clientcmdapi.AuthInfo) for key, authInfo := range config.AuthInfos { authInfo.ClientCertificate = resolveLocalPath(configDir, authInfo.ClientCertificate) authInfo.ClientKey = resolveLocalPath(configDir, authInfo.ClientKey) diff --git a/pkg/client/clientcmd/loader_test.go b/pkg/client/clientcmd/loader_test.go index 27c897d8a77..b011cc8d4a8 100644 --- a/pkg/client/clientcmd/loader_test.go +++ b/pkg/client/clientcmd/loader_test.go @@ -34,43 +34,43 @@ import ( var ( testConfigAlfa = clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "red-user": {Token: "red-token"}}, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "cow-cluster": {Server: "http://cow.org:8080"}}, - Contexts: map[string]clientcmdapi.Context{ + Contexts: map[string]*clientcmdapi.Context{ "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster", Namespace: "hammer-ns"}}, } testConfigBravo = clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "black-user": {Token: "black-token"}}, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "pig-cluster": {Server: "http://pig.org:8080"}}, - Contexts: map[string]clientcmdapi.Context{ + Contexts: map[string]*clientcmdapi.Context{ "queen-anne-context": {AuthInfo: "black-user", Cluster: "pig-cluster", Namespace: "saw-ns"}}, } testConfigCharlie = clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "green-user": {Token: "green-token"}}, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "horse-cluster": {Server: "http://horse.org:8080"}}, - Contexts: map[string]clientcmdapi.Context{ + Contexts: map[string]*clientcmdapi.Context{ "shaker-context": {AuthInfo: "green-user", Cluster: "horse-cluster", Namespace: "chisel-ns"}}, } testConfigDelta = clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "blue-user": {Token: "blue-token"}}, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "chicken-cluster": {Server: "http://chicken.org:8080"}}, - Contexts: map[string]clientcmdapi.Context{ + Contexts: map[string]*clientcmdapi.Context{ "gothic-context": {AuthInfo: "blue-user", Cluster: "chicken-cluster", Namespace: "plane-ns"}}, } testConfigConflictAlfa = clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "red-user": {Token: "a-different-red-token"}, "yellow-user": {Token: "yellow-token"}}, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "cow-cluster": {Server: "http://a-different-cow.org:8080", InsecureSkipTLSVerify: true}, "donkey-cluster": {Server: "http://donkey.org:8080", InsecureSkipTLSVerify: true}}, CurrentContext: "federal-context", @@ -176,21 +176,21 @@ func TestConflictingCurrentContext(t *testing.T) { func TestResolveRelativePaths(t *testing.T) { pathResolutionConfig1 := clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "relative-user-1": {ClientCertificate: "relative/client/cert", ClientKey: "../relative/client/key"}, "absolute-user-1": {ClientCertificate: "/absolute/client/cert", ClientKey: "/absolute/client/key"}, }, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "relative-server-1": {CertificateAuthority: "../relative/ca"}, "absolute-server-1": {CertificateAuthority: "/absolute/ca"}, }, } pathResolutionConfig2 := clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "relative-user-2": {ClientCertificate: "relative/client/cert2", ClientKey: "../relative/client/key2"}, "absolute-user-2": {ClientCertificate: "/absolute/client/cert2", ClientKey: "/absolute/client/key2"}, }, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "relative-server-2": {CertificateAuthority: "../relative/ca2"}, "absolute-server-2": {CertificateAuthority: "/absolute/ca2"}, }, diff --git a/pkg/client/clientcmd/validation.go b/pkg/client/clientcmd/validation.go index 31e927ccbf2..92538759750 100644 --- a/pkg/client/clientcmd/validation.go +++ b/pkg/client/clientcmd/validation.go @@ -95,15 +95,15 @@ func Validate(config clientcmdapi.Config) error { } for contextName, context := range config.Contexts { - validationErrors = append(validationErrors, validateContext(contextName, context, config)...) + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) } for authInfoName, authInfo := range config.AuthInfos { - validationErrors = append(validationErrors, validateAuthInfo(authInfoName, authInfo)...) + validationErrors = append(validationErrors, validateAuthInfo(authInfoName, *authInfo)...) } for clusterName, clusterInfo := range config.Clusters { - validationErrors = append(validationErrors, validateClusterInfo(clusterName, clusterInfo)...) + validationErrors = append(validationErrors, validateClusterInfo(clusterName, *clusterInfo)...) } return newErrConfigurationInvalid(validationErrors) @@ -131,9 +131,9 @@ func ConfirmUsable(config clientcmdapi.Config, passedContextName string) error { } if exists { - validationErrors = append(validationErrors, validateContext(contextName, context, config)...) - validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, config.AuthInfos[context.AuthInfo])...) - validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, config.Clusters[context.Cluster])...) + validationErrors = append(validationErrors, validateContext(contextName, *context, config)...) + validationErrors = append(validationErrors, validateAuthInfo(context.AuthInfo, *config.AuthInfos[context.AuthInfo])...) + validationErrors = append(validationErrors, validateClusterInfo(context.Cluster, *config.Clusters[context.Cluster])...) } return newErrConfigurationInvalid(validationErrors) diff --git a/pkg/client/clientcmd/validation_test.go b/pkg/client/clientcmd/validation_test.go index f93aa03d737..c058dc67aaa 100644 --- a/pkg/client/clientcmd/validation_test.go +++ b/pkg/client/clientcmd/validation_test.go @@ -28,25 +28,25 @@ import ( func TestConfirmUsableBadInfoButOkConfig(t *testing.T) { config := clientcmdapi.NewConfig() - config.Clusters["missing ca"] = clientcmdapi.Cluster{ + config.Clusters["missing ca"] = &clientcmdapi.Cluster{ Server: "anything", CertificateAuthority: "missing", } - config.AuthInfos["error"] = clientcmdapi.AuthInfo{ + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ Username: "anything", Token: "here", } - config.Contexts["dirty"] = clientcmdapi.Context{ + config.Contexts["dirty"] = &clientcmdapi.Context{ Cluster: "missing ca", AuthInfo: "error", } - config.Clusters["clean"] = clientcmdapi.Cluster{ + config.Clusters["clean"] = &clientcmdapi.Cluster{ Server: "anything", } - config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ Token: "here", } - config.Contexts["clean"] = clientcmdapi.Context{ + config.Contexts["clean"] = &clientcmdapi.Context{ Cluster: "clean", AuthInfo: "clean", } @@ -64,15 +64,15 @@ func TestConfirmUsableBadInfoButOkConfig(t *testing.T) { } func TestConfirmUsableBadInfoConfig(t *testing.T) { config := clientcmdapi.NewConfig() - config.Clusters["missing ca"] = clientcmdapi.Cluster{ + config.Clusters["missing ca"] = &clientcmdapi.Cluster{ Server: "anything", CertificateAuthority: "missing", } - config.AuthInfos["error"] = clientcmdapi.AuthInfo{ + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ Username: "anything", Token: "here", } - config.Contexts["first"] = clientcmdapi.Context{ + config.Contexts["first"] = &clientcmdapi.Context{ Cluster: "missing ca", AuthInfo: "error", } @@ -150,7 +150,7 @@ func TestIsConfigurationInvalid(t *testing.T) { func TestValidateMissingReferencesConfig(t *testing.T) { config := clientcmdapi.NewConfig() config.CurrentContext = "anything" - config.Contexts["anything"] = clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"} + config.Contexts["anything"] = &clientcmdapi.Context{Cluster: "missing", AuthInfo: "missing"} test := configValidationTest{ config: config, expectedErrorSubstring: []string{"user \"missing\" was not found for context \"anything\"", "cluster \"missing\" was not found for context \"anything\""}, @@ -162,7 +162,7 @@ func TestValidateMissingReferencesConfig(t *testing.T) { func TestValidateEmptyContext(t *testing.T) { config := clientcmdapi.NewConfig() config.CurrentContext = "anything" - config.Contexts["anything"] = clientcmdapi.Context{} + config.Contexts["anything"] = &clientcmdapi.Context{} test := configValidationTest{ config: config, expectedErrorSubstring: []string{"user was not specified for context \"anything\"", "cluster was not specified for context \"anything\""}, @@ -174,7 +174,7 @@ func TestValidateEmptyContext(t *testing.T) { func TestValidateEmptyClusterInfo(t *testing.T) { config := clientcmdapi.NewConfig() - config.Clusters["empty"] = clientcmdapi.Cluster{} + config.Clusters["empty"] = &clientcmdapi.Cluster{} test := configValidationTest{ config: config, expectedErrorSubstring: []string{"no server found for"}, @@ -185,7 +185,7 @@ func TestValidateEmptyClusterInfo(t *testing.T) { } func TestValidateMissingCAFileClusterInfo(t *testing.T) { config := clientcmdapi.NewConfig() - config.Clusters["missing ca"] = clientcmdapi.Cluster{ + config.Clusters["missing ca"] = &clientcmdapi.Cluster{ Server: "anything", CertificateAuthority: "missing", } @@ -199,7 +199,7 @@ func TestValidateMissingCAFileClusterInfo(t *testing.T) { } func TestValidateCleanClusterInfo(t *testing.T) { config := clientcmdapi.NewConfig() - config.Clusters["clean"] = clientcmdapi.Cluster{ + config.Clusters["clean"] = &clientcmdapi.Cluster{ Server: "anything", } test := configValidationTest{ @@ -214,7 +214,7 @@ func TestValidateCleanWithCAClusterInfo(t *testing.T) { defer os.Remove(tempFile.Name()) config := clientcmdapi.NewConfig() - config.Clusters["clean"] = clientcmdapi.Cluster{ + config.Clusters["clean"] = &clientcmdapi.Cluster{ Server: "anything", CertificateAuthority: tempFile.Name(), } @@ -228,7 +228,7 @@ func TestValidateCleanWithCAClusterInfo(t *testing.T) { func TestValidateEmptyAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() - config.AuthInfos["error"] = clientcmdapi.AuthInfo{} + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{} test := configValidationTest{ config: config, } @@ -238,7 +238,7 @@ func TestValidateEmptyAuthInfo(t *testing.T) { } func TestValidateCertFilesNotFoundAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() - config.AuthInfos["error"] = clientcmdapi.AuthInfo{ + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ ClientCertificate: "missing", ClientKey: "missing", } @@ -255,7 +255,7 @@ func TestValidateCertDataOverridesFiles(t *testing.T) { defer os.Remove(tempFile.Name()) config := clientcmdapi.NewConfig() - config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ ClientCertificate: tempFile.Name(), ClientCertificateData: []byte("certdata"), ClientKey: tempFile.Name(), @@ -274,7 +274,7 @@ func TestValidateCleanCertFilesAuthInfo(t *testing.T) { defer os.Remove(tempFile.Name()) config := clientcmdapi.NewConfig() - config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ ClientCertificate: tempFile.Name(), ClientKey: tempFile.Name(), } @@ -287,7 +287,7 @@ func TestValidateCleanCertFilesAuthInfo(t *testing.T) { } func TestValidateCleanTokenAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() - config.AuthInfos["clean"] = clientcmdapi.AuthInfo{ + config.AuthInfos["clean"] = &clientcmdapi.AuthInfo{ Token: "any-value", } test := configValidationTest{ @@ -300,7 +300,7 @@ func TestValidateCleanTokenAuthInfo(t *testing.T) { func TestValidateMultipleMethodsAuthInfo(t *testing.T) { config := clientcmdapi.NewConfig() - config.AuthInfos["error"] = clientcmdapi.AuthInfo{ + config.AuthInfos["error"] = &clientcmdapi.AuthInfo{ Token: "token", Username: "username", } @@ -319,7 +319,7 @@ type configValidationTest struct { } func (c configValidationTest) testContext(contextName string, t *testing.T) { - errs := validateContext(contextName, c.config.Contexts[contextName], *c.config) + errs := validateContext(contextName, *c.config.Contexts[contextName], *c.config) if len(c.expectedErrorSubstring) != 0 { if len(errs) == 0 { @@ -379,7 +379,7 @@ func (c configValidationTest) testConfig(t *testing.T) { } } func (c configValidationTest) testCluster(clusterName string, t *testing.T) { - errs := validateClusterInfo(clusterName, c.config.Clusters[clusterName]) + errs := validateClusterInfo(clusterName, *c.config.Clusters[clusterName]) if len(c.expectedErrorSubstring) != 0 { if len(errs) == 0 { @@ -399,7 +399,7 @@ func (c configValidationTest) testCluster(clusterName string, t *testing.T) { } func (c configValidationTest) testAuthInfo(authInfoName string, t *testing.T) { - errs := validateAuthInfo(authInfoName, c.config.AuthInfos[authInfoName]) + errs := validateAuthInfo(authInfoName, *c.config.AuthInfos[authInfoName]) if len(c.expectedErrorSubstring) != 0 { if len(errs) == 0 { diff --git a/pkg/kubectl/cmd/config/config_test.go b/pkg/kubectl/cmd/config/config_test.go index 8940ffb025b..1bafbf0e4a3 100644 --- a/pkg/kubectl/cmd/config/config_test.go +++ b/pkg/kubectl/cmd/config/config_test.go @@ -34,11 +34,11 @@ import ( func newRedFederalCowHammerConfig() clientcmdapi.Config { return clientcmdapi.Config{ - AuthInfos: map[string]clientcmdapi.AuthInfo{ + AuthInfos: map[string]*clientcmdapi.AuthInfo{ "red-user": {Token: "red-token"}}, - Clusters: map[string]clientcmdapi.Cluster{ + Clusters: map[string]*clientcmdapi.Cluster{ "cow-cluster": {Server: "http://cow.org:8080"}}, - Contexts: map[string]clientcmdapi.Context{ + Contexts: map[string]*clientcmdapi.Context{ "federal-context": {AuthInfo: "red-user", Cluster: "cow-cluster"}}, CurrentContext: "federal-context", } @@ -108,10 +108,7 @@ func TestSetNonExistantContext(t *testing.T) { func TestSetIntoExistingStruct(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() - a := expectedConfig.AuthInfos["red-user"] - authInfo := &a - authInfo.Password = "new-path-value" - expectedConfig.AuthInfos["red-user"] = *authInfo + expectedConfig.AuthInfos["red-user"].Password = "new-path-value" test := configCommandTest{ args: []string{"set", "users.red-user.password", "new-path-value"}, startingConfig: newRedFederalCowHammerConfig(), @@ -123,10 +120,7 @@ func TestSetIntoExistingStruct(t *testing.T) { func TestSetWithPathPrefixIntoExistingStruct(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() - cc := expectedConfig.Clusters["cow-clusters"] - cinfo := &cc - cinfo.Server = "http://cow.org:8080/foo/baz" - expectedConfig.Clusters["cow-cluster"] = *cinfo + expectedConfig.Clusters["cow-cluster"].Server = "http://cow.org:8080/foo/baz" test := configCommandTest{ args: []string{"set", "clusters.cow-cluster.server", "http://cow.org:8080/foo/baz"}, startingConfig: newRedFederalCowHammerConfig(), @@ -164,7 +158,7 @@ func TestUnsetStruct(t *testing.T) { func TestUnsetField(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.AuthInfos["red-user"] = *clientcmdapi.NewAuthInfo() + expectedConfig.AuthInfos["red-user"] = clientcmdapi.NewAuthInfo() test := configCommandTest{ args: []string{"unset", "users.red-user.token"}, startingConfig: newRedFederalCowHammerConfig(), @@ -178,7 +172,7 @@ func TestSetIntoNewStruct(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() cluster := clientcmdapi.NewCluster() cluster.Server = "new-server-value" - expectedConfig.Clusters["big-cluster"] = *cluster + expectedConfig.Clusters["big-cluster"] = cluster test := configCommandTest{ args: []string{"set", "clusters.big-cluster.server", "new-server-value"}, startingConfig: newRedFederalCowHammerConfig(), @@ -192,7 +186,7 @@ func TestSetBoolean(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() cluster := clientcmdapi.NewCluster() cluster.InsecureSkipTLSVerify = true - expectedConfig.Clusters["big-cluster"] = *cluster + expectedConfig.Clusters["big-cluster"] = cluster test := configCommandTest{ args: []string{"set", "clusters.big-cluster.insecure-skip-tls-verify", "true"}, startingConfig: newRedFederalCowHammerConfig(), @@ -206,7 +200,7 @@ func TestSetIntoNewConfig(t *testing.T) { expectedConfig := *clientcmdapi.NewConfig() context := clientcmdapi.NewContext() context.AuthInfo = "fake-user" - expectedConfig.Contexts["new-context"] = *context + expectedConfig.Contexts["new-context"] = context test := configCommandTest{ args: []string{"set", "contexts.new-context.user", "fake-user"}, startingConfig: *clientcmdapi.NewConfig(), @@ -218,7 +212,7 @@ func TestSetIntoNewConfig(t *testing.T) { func TestNewEmptyAuth(t *testing.T) { expectedConfig := *clientcmdapi.NewConfig() - expectedConfig.AuthInfos["the-user-name"] = *clientcmdapi.NewAuthInfo() + expectedConfig.AuthInfos["the-user-name"] = clientcmdapi.NewAuthInfo() test := configCommandTest{ args: []string{"set-credentials", "the-user-name"}, startingConfig: *clientcmdapi.NewConfig(), @@ -232,7 +226,7 @@ func TestAdditionalAuth(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() authInfo := clientcmdapi.NewAuthInfo() authInfo.Token = "token" - expectedConfig.AuthInfos["another-user"] = *authInfo + expectedConfig.AuthInfos["another-user"] = authInfo test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, startingConfig: newRedFederalCowHammerConfig(), @@ -250,7 +244,7 @@ func TestEmbedClientCert(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() authInfo := clientcmdapi.NewAuthInfo() authInfo.ClientCertificateData = fakeData - expectedConfig.AuthInfos["another-user"] = *authInfo + expectedConfig.AuthInfos["another-user"] = authInfo test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=" + fakeCertFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, @@ -269,7 +263,7 @@ func TestEmbedClientKey(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() authInfo := clientcmdapi.NewAuthInfo() authInfo.ClientKeyData = fakeData - expectedConfig.AuthInfos["another-user"] = *authInfo + expectedConfig.AuthInfos["another-user"] = authInfo test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagKeyFile + "=" + fakeKeyFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, @@ -296,7 +290,7 @@ func TestEmptyTokenAndCertAllowed(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() authInfo := clientcmdapi.NewAuthInfo() authInfo.ClientCertificate = "cert-file" - expectedConfig.AuthInfos["another-user"] = *authInfo + expectedConfig.AuthInfos["another-user"] = authInfo test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert-file", "--" + clientcmd.FlagBearerToken + "="}, @@ -312,7 +306,7 @@ func TestTokenAndCertAllowed(t *testing.T) { authInfo := clientcmdapi.NewAuthInfo() authInfo.Token = "token" authInfo.ClientCertificate = "cert-file" - expectedConfig.AuthInfos["another-user"] = *authInfo + expectedConfig.AuthInfos["another-user"] = authInfo test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert-file", "--" + clientcmd.FlagBearerToken + "=token"}, startingConfig: newRedFederalCowHammerConfig(), @@ -343,10 +337,10 @@ func TestBasicClearsToken(t *testing.T) { authInfoWithBasic.Password = "mypass" startingConfig := newRedFederalCowHammerConfig() - startingConfig.AuthInfos["another-user"] = *authInfoWithToken + startingConfig.AuthInfos["another-user"] = authInfoWithToken expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.AuthInfos["another-user"] = *authInfoWithBasic + expectedConfig.AuthInfos["another-user"] = authInfoWithBasic test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagUsername + "=myuser", "--" + clientcmd.FlagPassword + "=mypass"}, @@ -366,10 +360,10 @@ func TestTokenClearsBasic(t *testing.T) { authInfoWithToken.Token = "token" startingConfig := newRedFederalCowHammerConfig() - startingConfig.AuthInfos["another-user"] = *authInfoWithBasic + startingConfig.AuthInfos["another-user"] = authInfoWithBasic expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.AuthInfos["another-user"] = *authInfoWithToken + expectedConfig.AuthInfos["another-user"] = authInfoWithToken test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, @@ -395,10 +389,10 @@ func TestTokenLeavesCert(t *testing.T) { authInfoWithTokenAndCerts.ClientKeyData = []byte("keydata") startingConfig := newRedFederalCowHammerConfig() - startingConfig.AuthInfos["another-user"] = *authInfoWithCerts + startingConfig.AuthInfos["another-user"] = authInfoWithCerts expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.AuthInfos["another-user"] = *authInfoWithTokenAndCerts + expectedConfig.AuthInfos["another-user"] = authInfoWithTokenAndCerts test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagBearerToken + "=token"}, @@ -419,10 +413,10 @@ func TestCertLeavesToken(t *testing.T) { authInfoWithTokenAndCerts.ClientKey = "key" startingConfig := newRedFederalCowHammerConfig() - startingConfig.AuthInfos["another-user"] = *authInfoWithToken + startingConfig.AuthInfos["another-user"] = authInfoWithToken expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.AuthInfos["another-user"] = *authInfoWithTokenAndCerts + expectedConfig.AuthInfos["another-user"] = authInfoWithTokenAndCerts test := configCommandTest{ args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert", "--" + clientcmd.FlagKeyFile + "=key"}, @@ -441,10 +435,10 @@ func TestCAClearsInsecure(t *testing.T) { clusterInfoWithCA.CertificateAuthority = "cafile" startingConfig := newRedFederalCowHammerConfig() - startingConfig.Clusters["another-cluster"] = *clusterInfoWithInsecure + startingConfig.Clusters["another-cluster"] = clusterInfoWithInsecure expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.Clusters["another-cluster"] = *clusterInfoWithCA + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA test := configCommandTest{ args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=cafile"}, @@ -463,10 +457,10 @@ func TestCAClearsCAData(t *testing.T) { clusterInfoWithCA.CertificateAuthority = "cafile" startingConfig := newRedFederalCowHammerConfig() - startingConfig.Clusters["another-cluster"] = *clusterInfoWithCAData + startingConfig.Clusters["another-cluster"] = clusterInfoWithCAData expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.Clusters["another-cluster"] = *clusterInfoWithCA + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA test := configCommandTest{ args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=cafile", "--" + clientcmd.FlagInsecure + "=false"}, @@ -486,10 +480,10 @@ func TestInsecureClearsCA(t *testing.T) { clusterInfoWithCA.CertificateAuthorityData = []byte("cadata") startingConfig := newRedFederalCowHammerConfig() - startingConfig.Clusters["another-cluster"] = *clusterInfoWithCA + startingConfig.Clusters["another-cluster"] = clusterInfoWithCA expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.Clusters["another-cluster"] = *clusterInfoWithInsecure + expectedConfig.Clusters["another-cluster"] = clusterInfoWithInsecure test := configCommandTest{ args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagInsecure + "=true"}, @@ -513,10 +507,10 @@ func TestCADataClearsCA(t *testing.T) { clusterInfoWithCA.CertificateAuthority = "cafile" startingConfig := newRedFederalCowHammerConfig() - startingConfig.Clusters["another-cluster"] = *clusterInfoWithCA + startingConfig.Clusters["another-cluster"] = clusterInfoWithCA expectedConfig := newRedFederalCowHammerConfig() - expectedConfig.Clusters["another-cluster"] = *clusterInfoWithCAData + expectedConfig.Clusters["another-cluster"] = clusterInfoWithCAData test := configCommandTest{ args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=" + fakeCAFile.Name(), "--" + clientcmd.FlagEmbedCerts + "=true"}, @@ -566,7 +560,7 @@ func TestMergeExistingAuth(t *testing.T) { func TestNewEmptyCluster(t *testing.T) { expectedConfig := *clientcmdapi.NewConfig() - expectedConfig.Clusters["new-cluster"] = *clientcmdapi.NewCluster() + expectedConfig.Clusters["new-cluster"] = clientcmdapi.NewCluster() test := configCommandTest{ args: []string{"set-cluster", "new-cluster"}, startingConfig: *clientcmdapi.NewConfig(), @@ -578,7 +572,7 @@ func TestNewEmptyCluster(t *testing.T) { func TestAdditionalCluster(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() - cluster := *clientcmdapi.NewCluster() + cluster := clientcmdapi.NewCluster() cluster.APIVersion = testapi.Version() cluster.CertificateAuthority = "ca-location" cluster.InsecureSkipTLSVerify = false @@ -595,7 +589,7 @@ func TestAdditionalCluster(t *testing.T) { func TestOverwriteExistingCluster(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() - cluster := *clientcmdapi.NewCluster() + cluster := clientcmdapi.NewCluster() cluster.Server = "serverlocation" expectedConfig.Clusters["cow-cluster"] = cluster @@ -610,7 +604,7 @@ func TestOverwriteExistingCluster(t *testing.T) { func TestNewEmptyContext(t *testing.T) { expectedConfig := *clientcmdapi.NewConfig() - expectedConfig.Contexts["new-context"] = *clientcmdapi.NewContext() + expectedConfig.Contexts["new-context"] = clientcmdapi.NewContext() test := configCommandTest{ args: []string{"set-context", "new-context"}, startingConfig: *clientcmdapi.NewConfig(), @@ -622,7 +616,7 @@ func TestNewEmptyContext(t *testing.T) { func TestAdditionalContext(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() - context := *clientcmdapi.NewContext() + context := clientcmdapi.NewContext() context.Cluster = "some-cluster" context.AuthInfo = "some-user" context.Namespace = "different-namespace" @@ -683,10 +677,13 @@ func TestToBool(t *testing.T) { } -func testConfigCommand(args []string, startingConfig clientcmdapi.Config) (string, clientcmdapi.Config) { +func testConfigCommand(args []string, startingConfig clientcmdapi.Config, t *testing.T) (string, clientcmdapi.Config) { fakeKubeFile, _ := ioutil.TempFile("", "") defer os.Remove(fakeKubeFile.Name()) - clientcmd.WriteToFile(startingConfig, fakeKubeFile.Name()) + err := clientcmd.WriteToFile(startingConfig, fakeKubeFile.Name()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } argsToUse := make([]string, 0, 2+len(args)) argsToUse = append(argsToUse, "--kubeconfig="+fakeKubeFile.Name()) @@ -712,7 +709,7 @@ type configCommandTest struct { } func (test configCommandTest) run(t *testing.T) string { - out, actualConfig := testConfigCommand(test.args, test.startingConfig) + out, actualConfig := testConfigCommand(test.args, test.startingConfig, t) testSetNilMapsToEmpties(reflect.ValueOf(&test.expectedConfig)) testSetNilMapsToEmpties(reflect.ValueOf(&actualConfig)) @@ -755,20 +752,7 @@ func testSetNilMapsToEmpties(curr reflect.Value) { case reflect.Map: for _, mapKey := range actualCurrValue.MapKeys() { currMapValue := actualCurrValue.MapIndex(mapKey) - - // our maps do not hold pointers to structs, they hold the structs themselves. This means that MapIndex returns the struct itself - // That in turn means that they have kinds of type.Struct, which is not a settable type. Because of this, we need to make new struct of that type - // copy all the data from the old value into the new value, then take the .addr of the new value to modify it in the next recursion. - // clear as mud - modifiableMapValue := reflect.New(currMapValue.Type()).Elem() - modifiableMapValue.Set(currMapValue) - - if modifiableMapValue.Kind() == reflect.Struct { - modifiableMapValue = modifiableMapValue.Addr() - } - - testSetNilMapsToEmpties(modifiableMapValue) - actualCurrValue.SetMapIndex(mapKey, reflect.Indirect(modifiableMapValue)) + testSetNilMapsToEmpties(currMapValue) } case reflect.Struct: diff --git a/pkg/kubectl/cmd/config/create_authinfo.go b/pkg/kubectl/cmd/config/create_authinfo.go index 9e157d543bc..e5276c29fc0 100644 --- a/pkg/kubectl/cmd/config/create_authinfo.go +++ b/pkg/kubectl/cmd/config/create_authinfo.go @@ -108,8 +108,12 @@ func (o createAuthInfoOptions) run() error { return err } - authInfo := o.modifyAuthInfo(config.AuthInfos[o.name]) - config.AuthInfos[o.name] = authInfo + startingStanza, exists := config.AuthInfos[o.name] + if !exists { + startingStanza = clientcmdapi.NewAuthInfo() + } + authInfo := o.modifyAuthInfo(*startingStanza) + config.AuthInfos[o.name] = &authInfo if err := ModifyConfig(o.configAccess, *config); err != nil { return err diff --git a/pkg/kubectl/cmd/config/create_cluster.go b/pkg/kubectl/cmd/config/create_cluster.go index cd19a3e913a..2a2750e18db 100644 --- a/pkg/kubectl/cmd/config/create_cluster.go +++ b/pkg/kubectl/cmd/config/create_cluster.go @@ -94,8 +94,12 @@ func (o createClusterOptions) run() error { return err } - cluster := o.modifyCluster(config.Clusters[o.name]) - config.Clusters[o.name] = cluster + startingStanza, exists := config.Clusters[o.name] + if !exists { + startingStanza = clientcmdapi.NewCluster() + } + cluster := o.modifyCluster(*startingStanza) + config.Clusters[o.name] = &cluster if err := ModifyConfig(o.configAccess, *config); err != nil { return err diff --git a/pkg/kubectl/cmd/config/create_context.go b/pkg/kubectl/cmd/config/create_context.go index bd8ff3642af..d4b93dd9c25 100644 --- a/pkg/kubectl/cmd/config/create_context.go +++ b/pkg/kubectl/cmd/config/create_context.go @@ -81,8 +81,12 @@ func (o createContextOptions) run() error { return err } - context := o.modifyContext(config.Contexts[o.name]) - config.Contexts[o.name] = context + startingStanza, exists := config.Contexts[o.name] + if !exists { + startingStanza = clientcmdapi.NewContext() + } + context := o.modifyContext(*startingStanza) + config.Contexts[o.name] = &context if err := ModifyConfig(o.configAccess, *config); err != nil { return err diff --git a/pkg/kubectl/cmd/config/navigation_step_parser.go b/pkg/kubectl/cmd/config/navigation_step_parser.go index 4b5d1ca96f0..1e0bca9ed5a 100644 --- a/pkg/kubectl/cmd/config/navigation_step_parser.go +++ b/pkg/kubectl/cmd/config/navigation_step_parser.go @@ -50,7 +50,7 @@ func newNavigationSteps(path string) (*navigationSteps, error) { // store them as a single step. In order to do that, we need to determine what set of tokens is a legal step AFTER the name of the map key // This set of reflective code pulls the type of the map values, uses that type to look up the set of legal tags. Those legal tags are used to // walk the list of remaining parts until we find a match to a legal tag or the end of the string. That name is used to burn all the used parts. - mapValueType := currType.Elem() + mapValueType := currType.Elem().Elem() mapValueOptions, err := getPotentialTypeValues(mapValueType) if err != nil { return nil, err @@ -120,6 +120,10 @@ func findNameStep(parts []string, typeOptions util.StringSet) string { // getPotentialTypeValues takes a type and looks up the tags used to represent its fields when serialized. func getPotentialTypeValues(typeValue reflect.Type) (map[string]reflect.Type, error) { + if typeValue.Kind() == reflect.Ptr { + typeValue = typeValue.Elem() + } + if typeValue.Kind() != reflect.Struct { return nil, fmt.Errorf("%v is not of type struct", typeValue) } diff --git a/pkg/kubectl/cmd/config/navigation_step_parser_test.go b/pkg/kubectl/cmd/config/navigation_step_parser_test.go index 6d112386513..4e4a11b0e79 100644 --- a/pkg/kubectl/cmd/config/navigation_step_parser_test.go +++ b/pkg/kubectl/cmd/config/navigation_step_parser_test.go @@ -36,7 +36,7 @@ func TestParseWithDots(t *testing.T) { path: "clusters.my.dot.delimited.name.server", expectedNavigationSteps: navigationSteps{ steps: []navigationStep{ - {"clusters", reflect.TypeOf(make(map[string]clientcmdapi.Cluster))}, + {"clusters", reflect.TypeOf(make(map[string]*clientcmdapi.Cluster))}, {"my.dot.delimited.name", reflect.TypeOf(clientcmdapi.Cluster{})}, {"server", reflect.TypeOf("")}, }, @@ -51,7 +51,7 @@ func TestParseWithDotsEndingWithName(t *testing.T) { path: "contexts.10.12.12.12", expectedNavigationSteps: navigationSteps{ steps: []navigationStep{ - {"contexts", reflect.TypeOf(make(map[string]clientcmdapi.Context))}, + {"contexts", reflect.TypeOf(make(map[string]*clientcmdapi.Context))}, {"10.12.12.12", reflect.TypeOf(clientcmdapi.Context{})}, }, }, @@ -91,5 +91,6 @@ func (test stepParserTest) run(t *testing.T) { if !reflect.DeepEqual(test.expectedNavigationSteps, *actualSteps) { t.Errorf("diff: %v", util.ObjectDiff(test.expectedNavigationSteps, *actualSteps)) + t.Errorf("expected: %#v\n actual: %#v", test.expectedNavigationSteps, *actualSteps) } } diff --git a/pkg/kubectl/cmd/config/set.go b/pkg/kubectl/cmd/config/set.go index 38e397d52bd..8f68b9cfde8 100644 --- a/pkg/kubectl/cmd/config/set.go +++ b/pkg/kubectl/cmd/config/set.go @@ -139,26 +139,15 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri needToSetNewMapValue := currMapValue.Kind() == reflect.Invalid if needToSetNewMapValue { - currMapValue = reflect.New(mapValueType).Elem() + currMapValue = reflect.New(mapValueType.Elem()).Elem().Addr() actualCurrValue.SetMapIndex(mapKey, currMapValue) } - // our maps do not hold pointers to structs, they hold the structs themselves. This means that MapIndex returns the struct itself - // That in turn means that they have kinds of type.Struct, which is not a settable type. Because of this, we need to make new struct of that type - // copy all the data from the old value into the new value, then take the .addr of the new value to modify it in the next recursion. - // clear as mud - modifiableMapValue := reflect.New(currMapValue.Type()).Elem() - modifiableMapValue.Set(currMapValue) - - if modifiableMapValue.Kind() == reflect.Struct { - modifiableMapValue = modifiableMapValue.Addr() - } - err := modifyConfig(modifiableMapValue, steps, propertyValue, unset) + err := modifyConfig(currMapValue, steps, propertyValue, unset) if err != nil { return err } - actualCurrValue.SetMapIndex(mapKey, reflect.Indirect(modifiableMapValue)) return nil case reflect.String: @@ -213,5 +202,6 @@ func modifyConfig(curr reflect.Value, steps *navigationSteps, propertyValue stri } - return fmt.Errorf("Unrecognized type: %v", actualCurrValue) + panic(fmt.Errorf("Unrecognized type: %v", actualCurrValue)) + return nil } From c5ef83b29bc18e7c911a8a9b355e774df99d322a Mon Sep 17 00:00:00 2001 From: deads2k Date: Mon, 29 Jun 2015 16:27:31 -0400 Subject: [PATCH 46/49] relativize paths in kubeconfig files --- pkg/client/clientcmd/loader.go | 214 ++++++++++++++++------ pkg/kubectl/cmd/config/config.go | 23 ++- pkg/kubectl/cmd/config/config_test.go | 40 ++-- pkg/kubectl/cmd/config/create_authinfo.go | 5 +- pkg/kubectl/cmd/config/create_cluster.go | 4 +- pkg/kubectl/cmd/config/create_context.go | 2 +- pkg/kubectl/cmd/config/set.go | 2 +- pkg/kubectl/cmd/config/unset.go | 2 +- pkg/kubectl/cmd/config/use_context.go | 2 +- 9 files changed, 215 insertions(+), 79 deletions(-) diff --git a/pkg/client/clientcmd/loader.go b/pkg/client/clientcmd/loader.go index 4422b69deed..596433f6d30 100644 --- a/pkg/client/clientcmd/loader.go +++ b/pkg/client/clientcmd/loader.go @@ -23,6 +23,7 @@ import ( "os" "path" "path/filepath" + "strings" "github.com/ghodss/yaml" "github.com/imdario/mergo" @@ -120,11 +121,6 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { if err := mergeConfigWithFile(mapConfig, file); err != nil { errlist = append(errlist, err) } - if rules.ResolvePaths() { - if err := ResolveLocalPaths(file, mapConfig); err != nil { - errlist = append(errlist, err) - } - } } // merge all of the struct values in the reverse order so that priority is given correctly @@ -133,9 +129,6 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { for i := len(kubeConfigFiles) - 1; i >= 0; i-- { file := kubeConfigFiles[i] mergeConfigWithFile(nonMapConfig, file) - if rules.ResolvePaths() { - ResolveLocalPaths(file, nonMapConfig) - } } // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and @@ -144,6 +137,12 @@ func (rules *ClientConfigLoadingRules) Load() (*clientcmdapi.Config, error) { mergo.Merge(config, mapConfig) mergo.Merge(config, nonMapConfig) + if rules.ResolvePaths() { + if err := ResolveLocalPaths(config); err != nil { + errlist = append(errlist, err) + } + } + return config, errors.NewAggregate(errlist) } @@ -213,49 +212,6 @@ func mergeConfigWithFile(startingConfig *clientcmdapi.Config, filename string) e return nil } -// ResolveLocalPaths resolves all relative paths in the config object with respect to the parent directory of the filename -// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without -// modification of its contents. -func ResolveLocalPaths(filename string, config *clientcmdapi.Config) error { - if len(filename) == 0 { - return nil - } - - configDir, err := filepath.Abs(filepath.Dir(filename)) - if err != nil { - return fmt.Errorf("Could not determine the absolute path of config file %s: %v", filename, err) - } - - resolvedClusters := make(map[string]*clientcmdapi.Cluster) - for key, cluster := range config.Clusters { - cluster.CertificateAuthority = resolveLocalPath(configDir, cluster.CertificateAuthority) - resolvedClusters[key] = cluster - } - config.Clusters = resolvedClusters - - resolvedAuthInfos := make(map[string]*clientcmdapi.AuthInfo) - for key, authInfo := range config.AuthInfos { - authInfo.ClientCertificate = resolveLocalPath(configDir, authInfo.ClientCertificate) - authInfo.ClientKey = resolveLocalPath(configDir, authInfo.ClientKey) - resolvedAuthInfos[key] = authInfo - } - config.AuthInfos = resolvedAuthInfos - - return nil -} - -// resolveLocalPath makes the path absolute with respect to the startingDir -func resolveLocalPath(startingDir, path string) string { - if len(path) == 0 { - return path - } - if filepath.IsAbs(path) { - return path - } - - return filepath.Join(startingDir, path) -} - // LoadFromFile takes a filename and deserializes the contents into Config object func LoadFromFile(filename string) (*clientcmdapi.Config, error) { kubeconfigBytes, err := ioutil.ReadFile(filename) @@ -335,3 +291,159 @@ func Write(config clientcmdapi.Config) ([]byte, error) { func (rules ClientConfigLoadingRules) ResolvePaths() bool { return !rules.DoNotResolvePaths } + +// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin +// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without +// modification of its contents. +func ResolveLocalPaths(config *clientcmdapi.Config) error { + for _, cluster := range config.Clusters { + if len(cluster.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + } + for _, authInfo := range config.AuthInfos { + if len(authInfo.LocationOfOrigin) == 0 { + continue + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + } + + return nil +} + +// RelativizeClusterLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeClusterLocalPaths(cluster *clientcmdapi.Cluster) error { + if len(cluster.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %s", cluster.Server) + } + base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetClusterFileReferences(cluster), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetClusterFileReferences(cluster), base); err != nil { + return err + } + + return nil +} + +// RelativizeAuthInfoLocalPaths first absolutizes the paths by calling ResolveLocalPaths. This assumes that any NEW path is already +// absolute, but any existing path will be resolved relative to LocationOfOrigin +func RelativizeAuthInfoLocalPaths(authInfo *clientcmdapi.AuthInfo) error { + if len(authInfo.LocationOfOrigin) == 0 { + return fmt.Errorf("no location of origin for %v", authInfo) + } + base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) + if err != nil { + return fmt.Errorf("could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err) + } + + if err := ResolvePaths(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + if err := RelativizePathWithNoBacksteps(GetAuthInfoFileReferences(authInfo), base); err != nil { + return err + } + + return nil +} + +func RelativizeConfigPaths(config *clientcmdapi.Config, base string) error { + return RelativizePathWithNoBacksteps(GetConfigFileReferences(config), base) +} + +func ResolveConfigPaths(config *clientcmdapi.Config, base string) error { + return ResolvePaths(GetConfigFileReferences(config), base) +} + +func GetConfigFileReferences(config *clientcmdapi.Config) []*string { + refs := []*string{} + + for _, cluster := range config.Clusters { + refs = append(refs, GetClusterFileReferences(cluster)...) + } + for _, authInfo := range config.AuthInfos { + refs = append(refs, GetAuthInfoFileReferences(authInfo)...) + } + + return refs +} + +func GetClusterFileReferences(cluster *clientcmdapi.Cluster) []*string { + return []*string{&cluster.CertificateAuthority} +} + +func GetAuthInfoFileReferences(authInfo *clientcmdapi.AuthInfo) []*string { + return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} +} + +// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory +func ResolvePaths(refs []*string, base string) error { + for _, ref := range refs { + // Don't resolve empty paths + if len(*ref) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(*ref) { + *ref = filepath.Join(base, *ref) + } + } + } + return nil +} + +// RelativizePathWithNoBacksteps updates the given refs to be relative paths, relative to the given base directory as long as they do not require backsteps. +// Any path requiring a backstep is left as-is as long it is absolute. Any non-absolute path that can't be relativized produces an error +func RelativizePathWithNoBacksteps(refs []*string, base string) error { + for _, ref := range refs { + // Don't relativize empty paths + if len(*ref) > 0 { + rel, err := MakeRelative(*ref, base) + if err != nil { + return err + } + + // if we have a backstep, don't mess with the path + if strings.HasPrefix(rel, "../") { + if filepath.IsAbs(*ref) { + continue + } + + return fmt.Errorf("%v requires backsteps and is not absolute", *ref) + } + + *ref = rel + } + } + return nil +} + +func MakeRelative(path, base string) (string, error) { + if len(path) > 0 { + rel, err := filepath.Rel(base, path) + if err != nil { + return path, err + } + return rel, nil + } + return path, nil +} diff --git a/pkg/kubectl/cmd/config/config.go b/pkg/kubectl/cmd/config/config.go index fbf9d5d3e30..9876f48dade 100644 --- a/pkg/kubectl/cmd/config/config.go +++ b/pkg/kubectl/cmd/config/config.go @@ -187,8 +187,9 @@ func (o *PathOptions) GetExplicitFile() string { // uses the default destination file to write the results into. This results in multiple file reads, but it's very easy to follow. // Preferences and CurrentContext should always be set in the default destination file. Since we can't distinguish between empty and missing values // (no nil strings), we're forced have separate handling for them. In the kubeconfig cases, newConfig should have at most one difference, -// that means that this code will only write into a single file. -func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) error { +// that means that this code will only write into a single file. If you want to relativizePaths, you must provide a fully qualified path in any +// modified element. +func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config, relativizePaths bool) error { startingConfig, err := configAccess.GetStartingConfig() if err != nil { return err @@ -223,7 +224,14 @@ func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) erro } configToWrite := getConfigFromFileOrDie(destinationFile) - configToWrite.Clusters[key] = cluster + t := *cluster + configToWrite.Clusters[key] = &t + configToWrite.Clusters[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := clientcmd.RelativizeClusterLocalPaths(configToWrite.Clusters[key]); err != nil { + return err + } + } if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { return err @@ -257,7 +265,14 @@ func ModifyConfig(configAccess ConfigAccess, newConfig clientcmdapi.Config) erro } configToWrite := getConfigFromFileOrDie(destinationFile) - configToWrite.AuthInfos[key] = authInfo + t := *authInfo + configToWrite.AuthInfos[key] = &t + configToWrite.AuthInfos[key].LocationOfOrigin = destinationFile + if relativizePaths { + if err := clientcmd.RelativizeAuthInfoLocalPaths(configToWrite.AuthInfos[key]); err != nil { + return err + } + } if err := clientcmd.WriteToFile(*configToWrite, destinationFile); err != nil { return err diff --git a/pkg/kubectl/cmd/config/config_test.go b/pkg/kubectl/cmd/config/config_test.go index 1bafbf0e4a3..5e25b3e53c6 100644 --- a/pkg/kubectl/cmd/config/config_test.go +++ b/pkg/kubectl/cmd/config/config_test.go @@ -21,6 +21,7 @@ import ( "fmt" "io/ioutil" "os" + "path" "reflect" "strings" "testing" @@ -79,10 +80,9 @@ func TestSetCurrentContext(t *testing.T) { startingConfig := newRedFederalCowHammerConfig() newContextName := "the-new-context" - newContext := clientcmdapi.NewContext() - startingConfig.Contexts[newContextName] = *newContext - expectedConfig.Contexts[newContextName] = *newContext + startingConfig.Contexts[newContextName] = clientcmdapi.NewContext() + expectedConfig.Contexts[newContextName] = clientcmdapi.NewContext() expectedConfig.CurrentContext = newContextName @@ -287,13 +287,15 @@ func TestEmbedNoKeyOrCertDisallowed(t *testing.T) { } func TestEmptyTokenAndCertAllowed(t *testing.T) { + fakeCertFile, _ := ioutil.TempFile("", "cert-file") + expectedConfig := newRedFederalCowHammerConfig() authInfo := clientcmdapi.NewAuthInfo() - authInfo.ClientCertificate = "cert-file" + authInfo.ClientCertificate = path.Base(fakeCertFile.Name()) expectedConfig.AuthInfos["another-user"] = authInfo test := configCommandTest{ - args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert-file", "--" + clientcmd.FlagBearerToken + "="}, + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=" + fakeCertFile.Name(), "--" + clientcmd.FlagBearerToken + "="}, startingConfig: newRedFederalCowHammerConfig(), expectedConfig: expectedConfig, } @@ -305,10 +307,10 @@ func TestTokenAndCertAllowed(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() authInfo := clientcmdapi.NewAuthInfo() authInfo.Token = "token" - authInfo.ClientCertificate = "cert-file" + authInfo.ClientCertificate = "/cert-file" expectedConfig.AuthInfos["another-user"] = authInfo test := configCommandTest{ - args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert-file", "--" + clientcmd.FlagBearerToken + "=token"}, + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=/cert-file", "--" + clientcmd.FlagBearerToken + "=token"}, startingConfig: newRedFederalCowHammerConfig(), expectedConfig: expectedConfig, } @@ -409,8 +411,8 @@ func TestCertLeavesToken(t *testing.T) { authInfoWithTokenAndCerts := clientcmdapi.NewAuthInfo() authInfoWithTokenAndCerts.Token = "token" - authInfoWithTokenAndCerts.ClientCertificate = "cert" - authInfoWithTokenAndCerts.ClientKey = "key" + authInfoWithTokenAndCerts.ClientCertificate = "/cert" + authInfoWithTokenAndCerts.ClientKey = "/key" startingConfig := newRedFederalCowHammerConfig() startingConfig.AuthInfos["another-user"] = authInfoWithToken @@ -419,7 +421,7 @@ func TestCertLeavesToken(t *testing.T) { expectedConfig.AuthInfos["another-user"] = authInfoWithTokenAndCerts test := configCommandTest{ - args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=cert", "--" + clientcmd.FlagKeyFile + "=key"}, + args: []string{"set-credentials", "another-user", "--" + clientcmd.FlagCertFile + "=/cert", "--" + clientcmd.FlagKeyFile + "=/key"}, startingConfig: startingConfig, expectedConfig: expectedConfig, } @@ -428,11 +430,13 @@ func TestCertLeavesToken(t *testing.T) { } func TestCAClearsInsecure(t *testing.T) { + fakeCAFile, _ := ioutil.TempFile("", "ca-file") + clusterInfoWithInsecure := clientcmdapi.NewCluster() clusterInfoWithInsecure.InsecureSkipTLSVerify = true clusterInfoWithCA := clientcmdapi.NewCluster() - clusterInfoWithCA.CertificateAuthority = "cafile" + clusterInfoWithCA.CertificateAuthority = path.Base(fakeCAFile.Name()) startingConfig := newRedFederalCowHammerConfig() startingConfig.Clusters["another-cluster"] = clusterInfoWithInsecure @@ -441,7 +445,7 @@ func TestCAClearsInsecure(t *testing.T) { expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA test := configCommandTest{ - args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=cafile"}, + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=" + fakeCAFile.Name()}, startingConfig: startingConfig, expectedConfig: expectedConfig, } @@ -454,7 +458,7 @@ func TestCAClearsCAData(t *testing.T) { clusterInfoWithCAData.CertificateAuthorityData = []byte("cadata") clusterInfoWithCA := clientcmdapi.NewCluster() - clusterInfoWithCA.CertificateAuthority = "cafile" + clusterInfoWithCA.CertificateAuthority = "/cafile" startingConfig := newRedFederalCowHammerConfig() startingConfig.Clusters["another-cluster"] = clusterInfoWithCAData @@ -463,7 +467,7 @@ func TestCAClearsCAData(t *testing.T) { expectedConfig.Clusters["another-cluster"] = clusterInfoWithCA test := configCommandTest{ - args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=cafile", "--" + clientcmd.FlagInsecure + "=false"}, + args: []string{"set-cluster", "another-cluster", "--" + clientcmd.FlagCAFile + "=/cafile", "--" + clientcmd.FlagInsecure + "=false"}, startingConfig: startingConfig, expectedConfig: expectedConfig, } @@ -547,10 +551,10 @@ func TestCAAndInsecureDisallowed(t *testing.T) { func TestMergeExistingAuth(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() authInfo := expectedConfig.AuthInfos["red-user"] - authInfo.ClientKey = "key" + authInfo.ClientKey = "/key" expectedConfig.AuthInfos["red-user"] = authInfo test := configCommandTest{ - args: []string{"set-credentials", "red-user", "--" + clientcmd.FlagKeyFile + "=key"}, + args: []string{"set-credentials", "red-user", "--" + clientcmd.FlagKeyFile + "=/key"}, startingConfig: newRedFederalCowHammerConfig(), expectedConfig: expectedConfig, } @@ -574,12 +578,12 @@ func TestAdditionalCluster(t *testing.T) { expectedConfig := newRedFederalCowHammerConfig() cluster := clientcmdapi.NewCluster() cluster.APIVersion = testapi.Version() - cluster.CertificateAuthority = "ca-location" + cluster.CertificateAuthority = "/ca-location" cluster.InsecureSkipTLSVerify = false cluster.Server = "serverlocation" expectedConfig.Clusters["different-cluster"] = cluster test := configCommandTest{ - args: []string{"set-cluster", "different-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation", "--" + clientcmd.FlagInsecure + "=false", "--" + clientcmd.FlagCAFile + "=ca-location", "--" + clientcmd.FlagAPIVersion + "=" + testapi.Version()}, + args: []string{"set-cluster", "different-cluster", "--" + clientcmd.FlagAPIServer + "=serverlocation", "--" + clientcmd.FlagInsecure + "=false", "--" + clientcmd.FlagCAFile + "=/ca-location", "--" + clientcmd.FlagAPIVersion + "=" + testapi.Version()}, startingConfig: newRedFederalCowHammerConfig(), expectedConfig: expectedConfig, } diff --git a/pkg/kubectl/cmd/config/create_authinfo.go b/pkg/kubectl/cmd/config/create_authinfo.go index e5276c29fc0..8d4811a9ff1 100644 --- a/pkg/kubectl/cmd/config/create_authinfo.go +++ b/pkg/kubectl/cmd/config/create_authinfo.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "io/ioutil" + "path/filepath" "strings" "github.com/spf13/cobra" @@ -115,7 +116,7 @@ func (o createAuthInfoOptions) run() error { authInfo := o.modifyAuthInfo(*startingStanza) config.AuthInfos[o.name] = &authInfo - if err := ModifyConfig(o.configAccess, *config); err != nil { + if err := ModifyConfig(o.configAccess, *config, true); err != nil { return err } @@ -134,6 +135,7 @@ func (o *createAuthInfoOptions) modifyAuthInfo(existingAuthInfo clientcmdapi.Aut modifiedAuthInfo.ClientCertificateData, _ = ioutil.ReadFile(certPath) modifiedAuthInfo.ClientCertificate = "" } else { + certPath, _ = filepath.Abs(certPath) modifiedAuthInfo.ClientCertificate = certPath if len(modifiedAuthInfo.ClientCertificate) > 0 { modifiedAuthInfo.ClientCertificateData = nil @@ -146,6 +148,7 @@ func (o *createAuthInfoOptions) modifyAuthInfo(existingAuthInfo clientcmdapi.Aut modifiedAuthInfo.ClientKeyData, _ = ioutil.ReadFile(keyPath) modifiedAuthInfo.ClientKey = "" } else { + keyPath, _ = filepath.Abs(keyPath) modifiedAuthInfo.ClientKey = keyPath if len(modifiedAuthInfo.ClientKey) > 0 { modifiedAuthInfo.ClientKeyData = nil diff --git a/pkg/kubectl/cmd/config/create_cluster.go b/pkg/kubectl/cmd/config/create_cluster.go index 2a2750e18db..063bb2c98b2 100644 --- a/pkg/kubectl/cmd/config/create_cluster.go +++ b/pkg/kubectl/cmd/config/create_cluster.go @@ -21,6 +21,7 @@ import ( "fmt" "io" "io/ioutil" + "path/filepath" "github.com/spf13/cobra" @@ -101,7 +102,7 @@ func (o createClusterOptions) run() error { cluster := o.modifyCluster(*startingStanza) config.Clusters[o.name] = &cluster - if err := ModifyConfig(o.configAccess, *config); err != nil { + if err := ModifyConfig(o.configAccess, *config, true); err != nil { return err } @@ -133,6 +134,7 @@ func (o *createClusterOptions) modifyCluster(existingCluster clientcmdapi.Cluste modifiedCluster.InsecureSkipTLSVerify = false modifiedCluster.CertificateAuthority = "" } else { + caPath, _ = filepath.Abs(caPath) modifiedCluster.CertificateAuthority = caPath // Specifying a certificate authority file clears certificate authority data and insecure mode if caPath != "" { diff --git a/pkg/kubectl/cmd/config/create_context.go b/pkg/kubectl/cmd/config/create_context.go index d4b93dd9c25..fe7874f1200 100644 --- a/pkg/kubectl/cmd/config/create_context.go +++ b/pkg/kubectl/cmd/config/create_context.go @@ -88,7 +88,7 @@ func (o createContextOptions) run() error { context := o.modifyContext(*startingStanza) config.Contexts[o.name] = &context - if err := ModifyConfig(o.configAccess, *config); err != nil { + if err := ModifyConfig(o.configAccess, *config, true); err != nil { return err } diff --git a/pkg/kubectl/cmd/config/set.go b/pkg/kubectl/cmd/config/set.go index 8f68b9cfde8..585e87c599e 100644 --- a/pkg/kubectl/cmd/config/set.go +++ b/pkg/kubectl/cmd/config/set.go @@ -82,7 +82,7 @@ func (o setOptions) run() error { return err } - if err := ModifyConfig(o.configAccess, *config); err != nil { + if err := ModifyConfig(o.configAccess, *config, false); err != nil { return err } diff --git a/pkg/kubectl/cmd/config/unset.go b/pkg/kubectl/cmd/config/unset.go index 3e607dc1021..ef418060b62 100644 --- a/pkg/kubectl/cmd/config/unset.go +++ b/pkg/kubectl/cmd/config/unset.go @@ -75,7 +75,7 @@ func (o unsetOptions) run() error { return err } - if err := ModifyConfig(o.configAccess, *config); err != nil { + if err := ModifyConfig(o.configAccess, *config, false); err != nil { return err } diff --git a/pkg/kubectl/cmd/config/use_context.go b/pkg/kubectl/cmd/config/use_context.go index 341c48b6a9e..409f64daf13 100644 --- a/pkg/kubectl/cmd/config/use_context.go +++ b/pkg/kubectl/cmd/config/use_context.go @@ -66,7 +66,7 @@ func (o useContextOptions) run() error { config.CurrentContext = o.contextName - if err := ModifyConfig(o.configAccess, *config); err != nil { + if err := ModifyConfig(o.configAccess, *config, true); err != nil { return err } From 0f9fdcafea5b6b5bb1124082bccbb278c180a69d Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Thu, 30 Jul 2015 15:15:14 -0400 Subject: [PATCH 47/49] Revert "Revert "Improve conversion to support multiple packages"" This reverts commit 94a387d5d1c3c39008cc31f4ccaee74316f34e0d. --- cmd/genconversion/conversion.go | 13 +- cmd/gendeepcopy/deep_copy.go | 13 +- hack/update-generated-conversions.sh | 8 -- pkg/api/deep_copy_generated.go | 38 +++--- pkg/api/v1/conversion_generated.go | 55 ++++---- pkg/api/v1/deep_copy_generated.go | 36 ++--- pkg/runtime/conversion_generator.go | 177 +++++++++++++++++++++---- pkg/runtime/deep_copy_generator.go | 188 +++++++++++++++++++++------ 8 files changed, 389 insertions(+), 139 deletions(-) diff --git a/cmd/genconversion/conversion.go b/cmd/genconversion/conversion.go index c25fb0d0aad..c60b8f1dacd 100644 --- a/cmd/genconversion/conversion.go +++ b/cmd/genconversion/conversion.go @@ -17,13 +17,16 @@ limitations under the License. package main import ( + "fmt" "io" "os" + "path" "runtime" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -50,7 +53,9 @@ func main() { funcOut = file } - generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw()) + generator := pkg_runtime.NewConversionGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", *version)) + apiShort := generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") + generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource") // TODO(wojtek-t): Change the overwrites to a flag. generator.OverwritePackage(*version, "") for _, knownType := range api.Scheme.KnownTypes(*version) { @@ -58,10 +63,14 @@ func main() { glog.Errorf("error while generating conversion functions for %v: %v", knownType, err) } } + generator.RepackImports(util.NewStringSet()) + if err := generator.WriteImports(funcOut); err != nil { + glog.Fatalf("error while writing imports: %v", err) + } if err := generator.WriteConversionFunctions(funcOut); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } - if err := generator.RegisterConversionFunctions(funcOut); err != nil { + if err := generator.RegisterConversionFunctions(funcOut, fmt.Sprintf("%s.Scheme", apiShort)); err != nil { glog.Fatalf("Error while writing conversion functions: %v", err) } } diff --git a/cmd/gendeepcopy/deep_copy.go b/cmd/gendeepcopy/deep_copy.go index 59ac43bb72f..dcb1c33766d 100644 --- a/cmd/gendeepcopy/deep_copy.go +++ b/cmd/gendeepcopy/deep_copy.go @@ -19,12 +19,14 @@ package main import ( "io" "os" + "path" "runtime" "strings" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" pkg_runtime "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" flag "github.com/spf13/pflag" @@ -53,10 +55,14 @@ func main() { } knownVersion := *version + registerTo := "api.Scheme" if knownVersion == "api" { knownVersion = api.Scheme.Raw().InternalVersion + registerTo = "Scheme" } - generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw()) + pkgPath := path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", knownVersion) + generator := pkg_runtime.NewDeepCopyGenerator(api.Scheme.Raw(), pkgPath, util.NewStringSet("github.com/GoogleCloudPlatform/kubernetes")) + generator.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") for _, overwrite := range strings.Split(*overwrites, ",") { vals := strings.Split(overwrite, "=") @@ -67,13 +73,14 @@ func main() { glog.Errorf("error while generating deep copy functions for %v: %v", knownType, err) } } - if err := generator.WriteImports(funcOut, *version); err != nil { + generator.RepackImports() + if err := generator.WriteImports(funcOut); err != nil { glog.Fatalf("error while writing imports: %v", err) } if err := generator.WriteDeepCopyFunctions(funcOut); err != nil { glog.Fatalf("error while writing deep copy functions: %v", err) } - if err := generator.RegisterDeepCopyFunctions(funcOut, *version); err != nil { + if err := generator.RegisterDeepCopyFunctions(funcOut, registerTo); err != nil { glog.Fatalf("error while registering deep copy functions: %v", err) } } diff --git a/hack/update-generated-conversions.sh b/hack/update-generated-conversions.sh index d7b9f9fd591..7df31ca18f8 100755 --- a/hack/update-generated-conversions.sh +++ b/hack/update-generated-conversions.sh @@ -33,14 +33,6 @@ function generate_version() { cat >> $TMPFILE < 0 { + name = dirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { + name = subdirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + } + } + for i := 2; i < 100; i++ { + generatedName := fmt.Sprintf("%s%d", name, i) + if _, ok := g.shortImports[generatedName]; !ok { + g.imports[pkg] = generatedName + g.shortImports[generatedName] = pkg + return generatedName + } + } + panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) +} + func (g *conversionGenerator) typeName(inType reflect.Type) string { switch inType.Kind() { - case reflect.Map: - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) + case reflect.Map: + if len(inType.Name()) == 0 { + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) + } + fallthrough default: - typeWithPkg := fmt.Sprintf("%s", inType) - slices := strings.Split(typeWithPkg, ".") - if len(slices) == 1 { + pkg, name := inType.PkgPath(), inType.Name() + if len(name) == 0 && inType.Kind() == reflect.Struct { + return "struct{}" + } + if len(pkg) == 0 { // Default package. - return slices[0] + return name } - if len(slices) == 2 { - pkg := slices[0] - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if pkg != "" { - pkg = pkg + "." - } - return pkg + slices[1] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val } - panic("Incorrect type name: " + typeWithPkg) + if len(pkg) == 0 { + return name + } + short := g.addImportByPath(pkg) + if len(short) > 0 { + return fmt.Sprintf("%s.%s", short, name) + } + return name } } @@ -658,6 +785,10 @@ func (g *conversionGenerator) existsDedicatedConversionFunction(inType, outType // unnamed. Thus we return false here. return false } + // TODO: no way to handle private conversions in different packages + if g.assumePrivateConversions { + return false + } return g.scheme.Converter().HasConversionFunc(inType, outType) } diff --git a/pkg/runtime/deep_copy_generator.go b/pkg/runtime/deep_copy_generator.go index 7be7af6bd86..20d931caa0a 100644 --- a/pkg/runtime/deep_copy_generator.go +++ b/pkg/runtime/deep_copy_generator.go @@ -19,6 +19,7 @@ package runtime import ( "fmt" "io" + "path" "reflect" "sort" "strings" @@ -38,9 +39,20 @@ type DeepCopyGenerator interface { // functions for this type and all nested types will be generated. AddType(inType reflect.Type) error + // ReplaceType registers a type that should be used instead of the type + // with the provided pkgPath and name. + ReplaceType(pkgPath, name string, in interface{}) + + // AddImport registers a package name with the generator and returns its + // short name. + AddImport(pkgPath string) string + + // RepackImports creates a stable ordering of import short names + RepackImports() + // Writes all imports that are necessary for deep-copy function and // their registration. - WriteImports(w io.Writer, pkg string) error + WriteImports(w io.Writer) error // Writes deel-copy functions for all types added via AddType() method // and their nested types. @@ -57,20 +69,80 @@ type DeepCopyGenerator interface { OverwritePackage(pkg, overwrite string) } -func NewDeepCopyGenerator(scheme *conversion.Scheme) DeepCopyGenerator { - return &deepCopyGenerator{ +func NewDeepCopyGenerator(scheme *conversion.Scheme, targetPkg string, include util.StringSet) DeepCopyGenerator { + g := &deepCopyGenerator{ scheme: scheme, + targetPkg: targetPkg, copyables: make(map[reflect.Type]bool), - imports: util.StringSet{}, + imports: make(map[string]string), + shortImports: make(map[string]string), pkgOverwrites: make(map[string]string), + replace: make(map[pkgPathNamePair]reflect.Type), + include: include, } + g.targetPackage(targetPkg) + g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") + return g +} + +type pkgPathNamePair struct { + PkgPath string + Name string } type deepCopyGenerator struct { - scheme *conversion.Scheme - copyables map[reflect.Type]bool - imports util.StringSet + scheme *conversion.Scheme + targetPkg string + copyables map[reflect.Type]bool + // map of package names to shortname + imports map[string]string + // map of short names to package names + shortImports map[string]string pkgOverwrites map[string]string + replace map[pkgPathNamePair]reflect.Type + include util.StringSet +} + +func (g *deepCopyGenerator) addImportByPath(pkg string) string { + if name, ok := g.imports[pkg]; ok { + return name + } + name := path.Base(pkg) + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if dirname := path.Base(path.Dir(pkg)); len(dirname) > 0 { + name = dirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + if subdirname := path.Base(path.Dir(path.Dir(pkg))); len(subdirname) > 0 { + name = subdirname + name + if _, ok := g.shortImports[name]; !ok { + g.imports[pkg] = name + g.shortImports[name] = pkg + return name + } + } + } + for i := 2; i < 100; i++ { + generatedName := fmt.Sprintf("%s%d", name, i) + if _, ok := g.shortImports[generatedName]; !ok { + g.imports[pkg] = generatedName + g.shortImports[generatedName] = pkg + return generatedName + } + } + panic(fmt.Sprintf("unable to find a unique name for the package path %q: %v", pkg, g.shortImports)) +} + +func (g *deepCopyGenerator) targetPackage(pkg string) { + g.imports[pkg] = "" + g.shortImports[""] = pkg } func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { @@ -90,11 +162,18 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return err } case reflect.Interface: - g.imports.Insert(inType.PkgPath()) + g.addImportByPath(inType.PkgPath()) return nil case reflect.Struct: - g.imports.Insert(inType.PkgPath()) - if !strings.HasPrefix(inType.PkgPath(), "github.com/GoogleCloudPlatform/kubernetes") { + g.addImportByPath(inType.PkgPath()) + found := false + for s := range g.include { + if strings.HasPrefix(inType.PkgPath(), s) { + found = true + break + } + } + if !found { return nil } for i := 0; i < inType.NumField(); i++ { @@ -110,6 +189,15 @@ func (g *deepCopyGenerator) addAllRecursiveTypes(inType reflect.Type) error { return nil } +func (g *deepCopyGenerator) AddImport(pkg string) string { + return g.addImportByPath(pkg) +} + +// ReplaceType registers a replacement type to be used instead of the named type +func (g *deepCopyGenerator) ReplaceType(pkgPath, name string, t interface{}) { + g.replace[pkgPathNamePair{pkgPath, name}] = reflect.TypeOf(t) +} + func (g *deepCopyGenerator) AddType(inType reflect.Type) error { if inType.Kind() != reflect.Struct { return fmt.Errorf("non-struct copies are not supported") @@ -117,10 +205,23 @@ func (g *deepCopyGenerator) AddType(inType reflect.Type) error { return g.addAllRecursiveTypes(inType) } -func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error { +func (g *deepCopyGenerator) RepackImports() { + var packages []string + for key := range g.imports { + packages = append(packages, key) + } + sort.Strings(packages) + g.imports = make(map[string]string) + g.shortImports = make(map[string]string) + + g.targetPackage(g.targetPkg) + for _, pkg := range packages { + g.addImportByPath(pkg) + } +} + +func (g *deepCopyGenerator) WriteImports(w io.Writer) error { var packages []string - packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/api") - packages = append(packages, "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion") for key := range g.imports { packages = append(packages, key) } @@ -130,10 +231,13 @@ func (g *deepCopyGenerator) WriteImports(w io.Writer, pkg string) error { indent := 0 buffer.addLine("import (\n", indent) for _, importPkg := range packages { - if strings.HasSuffix(importPkg, pkg) { + if len(importPkg) == 0 { continue } - buffer.addLine(fmt.Sprintf("\"%s\"\n", importPkg), indent+1) + if len(g.imports[importPkg]) == 0 { + continue + } + buffer.addLine(fmt.Sprintf("%s \"%s\"\n", g.imports[importPkg], importPkg), indent+1) } buffer.addLine(")\n", indent) buffer.addLine("\n", indent) @@ -159,35 +263,47 @@ func (s byPkgAndName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (g *deepCopyGenerator) typeName(inType reflect.Type) string { +func (g *deepCopyGenerator) nameForType(inType reflect.Type) string { switch inType.Kind() { - case reflect.Map: - return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) case reflect.Slice: return fmt.Sprintf("[]%s", g.typeName(inType.Elem())) case reflect.Ptr: return fmt.Sprintf("*%s", g.typeName(inType.Elem())) + case reflect.Map: + if len(inType.Name()) == 0 { + return fmt.Sprintf("map[%s]%s", g.typeName(inType.Key()), g.typeName(inType.Elem())) + } + fallthrough default: - typeWithPkg := fmt.Sprintf("%s", inType) - slices := strings.Split(typeWithPkg, ".") - if len(slices) == 1 { + pkg, name := inType.PkgPath(), inType.Name() + if len(name) == 0 && inType.Kind() == reflect.Struct { + return "struct{}" + } + if len(pkg) == 0 { // Default package. - return slices[0] + return name } - if len(slices) == 2 { - pkg := slices[0] - if val, found := g.pkgOverwrites[pkg]; found { - pkg = val - } - if pkg != "" { - pkg = pkg + "." - } - return pkg + slices[1] + if val, found := g.pkgOverwrites[pkg]; found { + pkg = val } - panic("Incorrect type name: " + typeWithPkg) + if len(pkg) == 0 { + return name + } + short := g.addImportByPath(pkg) + if len(short) > 0 { + return fmt.Sprintf("%s.%s", short, name) + } + return name } } +func (g *deepCopyGenerator) typeName(inType reflect.Type) string { + if t, ok := g.replace[pkgPathNamePair{inType.PkgPath(), inType.Name()}]; ok { + return g.nameForType(t) + } + return g.nameForType(inType) +} + func (g *deepCopyGenerator) deepCopyFunctionName(inType reflect.Type) string { funcNameFormat := "deepCopy_%s_%s" inPkg := packageForName(inType) @@ -442,12 +558,8 @@ func (g *deepCopyGenerator) writeDeepCopyForType(b *buffer, inType reflect.Type, func (g *deepCopyGenerator) writeRegisterHeader(b *buffer, pkg string, indent int) { b.addLine("func init() {\n", indent) - registerFormat := "err := %sScheme.AddGeneratedDeepCopyFuncs(\n" - if pkg == "api" { - b.addLine(fmt.Sprintf(registerFormat, ""), indent+1) - } else { - b.addLine(fmt.Sprintf(registerFormat, "api."), indent+1) - } + registerFormat := "err := %s.AddGeneratedDeepCopyFuncs(\n" + b.addLine(fmt.Sprintf(registerFormat, pkg), indent+1) } func (g *deepCopyGenerator) writeRegisterFooter(b *buffer, indent int) { From 532e1ca20cae5702705daf11044762cc605a4035 Mon Sep 17 00:00:00 2001 From: Clayton Coleman Date: Thu, 30 Jul 2015 14:45:27 -0400 Subject: [PATCH 48/49] Update generator tests to passing --- pkg/runtime/conversion_generation_test.go | 18 ++++++++++++------ pkg/runtime/deep_copy_generation_test.go | 18 +++++++++++++----- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/pkg/runtime/conversion_generation_test.go b/pkg/runtime/conversion_generation_test.go index e2ce4f50256..1fd0c4da026 100644 --- a/pkg/runtime/conversion_generation_test.go +++ b/pkg/runtime/conversion_generation_test.go @@ -23,31 +23,37 @@ import ( "io" "io/ioutil" "os" + "path" "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" - - "github.com/golang/glog" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" ) func generateConversions(t *testing.T, version string) bytes.Buffer { - g := runtime.NewConversionGenerator(api.Scheme.Raw()) + g := runtime.NewConversionGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", version)) + apiShort := g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") + g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource") + // TODO(wojtek-t): Change the overwrites to a flag. g.OverwritePackage(version, "") for _, knownType := range api.Scheme.KnownTypes(version) { if err := g.GenerateConversionsForType(version, knownType); err != nil { - glog.Errorf("error while generating conversion functions for %v: %v", knownType, err) + t.Fatalf("error while generating conversion functions for %v: %v", knownType, err) } } - + g.RepackImports(util.NewStringSet()) var functions bytes.Buffer functionsWriter := bufio.NewWriter(&functions) + if err := g.WriteImports(functionsWriter); err != nil { + t.Fatalf("error while writing imports: %v", err) + } if err := g.WriteConversionFunctions(functionsWriter); err != nil { t.Fatalf("couldn't generate conversion functions: %v", err) } - if err := g.RegisterConversionFunctions(functionsWriter); err != nil { + if err := g.RegisterConversionFunctions(functionsWriter, fmt.Sprintf("%s.Scheme", apiShort)); err != nil { t.Fatalf("couldn't generate conversion function names: %v", err) } if err := functionsWriter.Flush(); err != nil { diff --git a/pkg/runtime/deep_copy_generation_test.go b/pkg/runtime/deep_copy_generation_test.go index e7cd18b021a..06a042ecc44 100644 --- a/pkg/runtime/deep_copy_generation_test.go +++ b/pkg/runtime/deep_copy_generation_test.go @@ -22,23 +22,30 @@ import ( "fmt" "io/ioutil" "os" + "path" "testing" "github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api/testapi" _ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1" "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime" + "github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/golang/glog" ) func generateDeepCopies(t *testing.T, version string) bytes.Buffer { - g := runtime.NewDeepCopyGenerator(api.Scheme.Raw()) - g.OverwritePackage(version, "") testedVersion := version - if version == "api" { + registerTo := "api.Scheme" + if testedVersion == "api" { testedVersion = api.Scheme.Raw().InternalVersion + registerTo = "Scheme" } + + g := runtime.NewDeepCopyGenerator(api.Scheme.Raw(), path.Join("github.com/GoogleCloudPlatform/kubernetes/pkg/api", testedVersion), util.NewStringSet("github.com/GoogleCloudPlatform/kubernetes")) + g.AddImport("github.com/GoogleCloudPlatform/kubernetes/pkg/api") + g.OverwritePackage(version, "") + for _, knownType := range api.Scheme.KnownTypes(testedVersion) { if err := g.AddType(knownType); err != nil { glog.Errorf("error while generating deep-copy functions for %v: %v", knownType, err) @@ -47,13 +54,14 @@ func generateDeepCopies(t *testing.T, version string) bytes.Buffer { var functions bytes.Buffer functionsWriter := bufio.NewWriter(&functions) - if err := g.WriteImports(functionsWriter, version); err != nil { + g.RepackImports() + if err := g.WriteImports(functionsWriter); err != nil { t.Fatalf("couldn't generate deep-copy function imports: %v", err) } if err := g.WriteDeepCopyFunctions(functionsWriter); err != nil { t.Fatalf("couldn't generate deep-copy functions: %v", err) } - if err := g.RegisterDeepCopyFunctions(functionsWriter, version); err != nil { + if err := g.RegisterDeepCopyFunctions(functionsWriter, registerTo); err != nil { t.Fatalf("couldn't generate deep-copy function names: %v", err) } if err := functionsWriter.Flush(); err != nil { From 792849251af8e39032796ec131df37f2375002f9 Mon Sep 17 00:00:00 2001 From: Arsen Mamikonyan Date: Tue, 28 Jul 2015 12:29:42 -0700 Subject: [PATCH 49/49] Allow settings --service-node-port-range option using a config variable --- cluster/ubuntu/config-default.sh | 2 ++ cluster/ubuntu/util.sh | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/cluster/ubuntu/config-default.sh b/cluster/ubuntu/config-default.sh index c857467a8fb..eb59dc242b2 100755 --- a/cluster/ubuntu/config-default.sh +++ b/cluster/ubuntu/config-default.sh @@ -39,6 +39,8 @@ export FLANNEL_OPTS=${FLANNEL_OPTS:-"Network": 172.16.0.0/16} # Admission Controllers to invoke prior to persisting objects in cluster export ADMISSION_CONTROL=NamespaceLifecycle,NamespaceExists,LimitRanger,ServiceAccount,ResourceQuota,SecurityContextDeny +SERVICE_NODE_PORT_RANGE=${SERVICE_NODE_PORT_RANGE:-"30000-32767"} + # Optional: Enable node logging. ENABLE_NODE_LOGGING=false LOGGING_DESTINATION=${LOGGING_DESTINATION:-elasticsearch} diff --git a/cluster/ubuntu/util.sh b/cluster/ubuntu/util.sh index 55d30d228b6..d04cd0c02e4 100755 --- a/cluster/ubuntu/util.sh +++ b/cluster/ubuntu/util.sh @@ -202,6 +202,7 @@ KUBE_APISERVER_OPTS="--insecure-bind-address=0.0.0.0 \ --logtostderr=true \ --service-cluster-ip-range=${1} \ --admission-control=${2} \ +--service-node-port-range=${3} \ --client-ca-file=/srv/kubernetes/ca.crt \ --tls-cert-file=/srv/kubernetes/server.cert \ --tls-private-key-file=/srv/kubernetes/server.key" @@ -371,7 +372,7 @@ function provision-master() { ~/kube/make-ca-cert ${MASTER_IP} IP:${MASTER_IP},IP:${SERVICE_CLUSTER_IP_RANGE%.*}.1,DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local; \ setClusterInfo; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ - create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \ + create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ create-flanneld-opts; \ @@ -413,7 +414,7 @@ function provision-masterandminion() { ssh $SSH_OPTS -t $MASTER "source ~/kube/util.sh; \ setClusterInfo; \ create-etcd-opts "${mm[${MASTER_IP}]}" "${MASTER_IP}" "${CLUSTER}"; \ - create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}"; \ + create-kube-apiserver-opts "${SERVICE_CLUSTER_IP_RANGE}" "${ADMISSION_CONTROL}" "${SERVICE_NODE_PORT_RANGE}"; \ create-kube-controller-manager-opts "${MINION_IPS}"; \ create-kube-scheduler-opts; \ create-kubelet-opts "${MASTER_IP}" "${MASTER_IP}" "${DNS_SERVER_IP}" "${DNS_DOMAIN}";