
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Adding e2e test for statefulsets for vsphere cloud provider **What this PR does / why we need it**: This PR adds a new e2e test for statefulsets for vSphere cloud Provider. Test does following tasks. - Create a storage class with thin diskformat. - Create nginx service. - Create nginx statefulsets with 3 replicas. - Wait until all Pods are ready and PVCs are bounded with PV. - Verify volumes are accessible in all statefulsets pods with creating empty file. - Scale down statefulsets to 2 replicas. - Scale up statefulsets to 3 replicas. - Scale down statefulsets to 0 replicas and delete all pods. - Delete all PVCs from the test namespace. - Delete the storage class. **Which issue this PR fixes** fixes # https://github.com/vmware/kubernetes/issues/275 **Special notes for your reviewer**: Test Logs ``` root@k8s-dev-vm-02:~/divyenp/kubernetes# go run hack/e2e.go --check-version-skew=false --v --test --test_args='--ginkgo.focus=vsphere\sstatefulset\stesting' flag provided but not defined: -check-version-skew Usage of /tmp/go-build247641121/command-line-arguments/_obj/exe/e2e: -get go get -u kubetest if old or not installed (default true) -old duration Consider kubetest old if it exceeds this (default 24h0m0s) 2017/10/18 19:24:33 e2e.go:55: NOTICE: go run hack/e2e.go is now a shim for test-infra/kubetest 2017/10/18 19:24:33 e2e.go:56: Usage: go run hack/e2e.go [--get=true] [--old=24h0m0s] -- [KUBETEST_ARGS] 2017/10/18 19:24:33 e2e.go:57: The separator is required to use --get or --old flags 2017/10/18 19:24:33 e2e.go:58: The -- flag separator also suppresses this message 2017/10/18 19:24:33 e2e.go:77: Calling kubetest --check-version-skew=false --v --test --test_args=--ginkgo.focus=vsphere\sstatefulset\stesting... 2017/10/18 19:24:33 util.go:154: Running: ./cluster/kubectl.sh --match-server-version=false version 2017/10/18 19:24:34 util.go:156: Step './cluster/kubectl.sh --match-server-version=false version' finished in 290.682219ms 2017/10/18 19:24:34 util.go:154: Running: ./hack/e2e-internal/e2e-status.sh Skeleton Provider: prepare-e2e not implemented Client Version: version.Info{Major:"1", Minor:"9+", GitVersion:"v1.9.0-alpha.1.1217+8b041da0f996c1-dirty", GitCommit:"8b041da0f996c185438a7ed8282f92734a2ed0e7", GitTreeState:"dirty", BuildDate:"2017-10-19T00:46:00Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} Server Version: version.Info{Major:"1", Minor:"9+", GitVersion:"v1.9.0-alpha.1.1293+d462bac7805f53", GitCommit:"d462bac7805f536a43c7d5fb98aca138ba1237eb", GitTreeState:"clean", BuildDate:"2017-10-18T07:07:08Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"} 2017/10/18 19:24:34 util.go:156: Step './hack/e2e-internal/e2e-status.sh' finished in 305.965323ms 2017/10/18 19:24:34 util.go:154: Running: ./hack/ginkgo-e2e.sh --ginkgo.focus=vsphere\sstatefulset\stesting Conformance test: not doing test setup. Oct 18 19:24:35.808: INFO: Overriding default scale value of zero to 1 Oct 18 19:24:35.808: INFO: Overriding default milliseconds value of zero to 5000 I1018 19:24:36.073718 7768 e2e.go:383] Starting e2e run "a63561de-b474-11e7-8f6b-0050569c26b8" on Ginkgo node 1 Running Suite: Kubernetes e2e suite =================================== Random Seed: 1508379875 - Will randomize all specs Will run 1 of 713 specs Oct 18 19:24:36.132: INFO: >>> kubeConfig: /root/.kube/config Oct 18 19:24:36.139: INFO: Waiting up to 4h0m0s for all (but 0) nodes to be schedulable Oct 18 19:24:36.177: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready Oct 18 19:24:36.321: INFO: 13 / 13 pods in namespace 'kube-system' are running and ready (0 seconds elapsed) Oct 18 19:24:36.321: INFO: expected 4 pod replicas in namespace 'kube-system', 4 are Running and Ready. Oct 18 19:24:36.326: INFO: Waiting for pods to enter Success, but no pods in "kube-system" match label map[name:e2e-image-puller] Oct 18 19:24:36.326: INFO: Dumping network health container logs from all nodes... Oct 18 19:24:36.338: INFO: Client version: v1.9.0-alpha.1.1217+8b041da0f996c1-dirty Oct 18 19:24:36.340: INFO: Server version: v1.9.0-alpha.1.1293+d462bac7805f53 SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS ------------------------------ [sig-storage] vsphere statefulset vsphere statefulset testing /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:155 [BeforeEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:133 STEP: Creating a kubernetes client Oct 18 19:24:36.349: INFO: >>> kubeConfig: /root/.kube/config STEP: Building a namespace api object STEP: Waiting for a default service account to be provisioned in namespace [BeforeEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:63 [It] vsphere statefulset testing /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:155 STEP: Creating StorageClass for Statefulset STEP: Creating statefulset Oct 18 19:24:36.489: INFO: Parsing statefulset from test/e2e/testing-manifests/statefulset/nginx/statefulset.yaml Oct 18 19:24:36.503: INFO: Parsing service from test/e2e/testing-manifests/statefulset/nginx/service.yaml Oct 18 19:24:36.514: INFO: creating web service Oct 18 19:24:36.527: INFO: creating statefulset e2e-tests-vsphere-statefulset-gnfmp/web with 3 replicas and selector &LabelSelector{MatchLabels:map[string]string{app: nginx,},MatchExpressions:[],} Oct 18 19:24:36.561: INFO: Found 0 stateful pods, waiting for 3 Oct 18 19:24:46.567: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:24:56.568: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:06.568: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:16.566: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:26.567: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:36.567: INFO: Found 1 stateful pods, waiting for 3 Oct 18 19:25:46.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:25:56.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:06.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:16.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:26.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:36.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:46.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:26:56.571: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:06.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:16.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:26.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:36.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:46.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:27:56.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:06.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:16.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:26.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:36.574: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:46.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:28:56.571: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:06.569: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:16.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:26.566: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:36.568: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:46.566: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:29:56.567: INFO: Found 2 stateful pods, waiting for 3 Oct 18 19:30:06.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:06.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:06.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:16.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:16.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:16.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:26.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:26.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:26.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:36.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:36.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:36.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:46.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:46.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:46.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:30:56.566: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:56.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:30:56.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:06.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:06.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:06.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:16.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:16.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:16.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:26.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:26.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:26.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:36.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:36.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:36.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:46.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:46.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:46.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:31:56.568: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:56.568: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:31:56.568: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:32:06.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:06.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:06.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:32:16.571: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:16.571: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:16.571: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Pending - Ready=false Oct 18 19:32:26.567: INFO: Waiting for pod web-0 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:26.567: INFO: Waiting for pod web-1 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:26.567: INFO: Waiting for pod web-2 to enter Running - Ready=true, currently Running - Ready=true Oct 18 19:32:26.567: INFO: Waiting for statefulset status.replicas updated to 3 Oct 18 19:32:26.605: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-0 -- /bin/sh -c ls -idlh /usr/share/nginx/html' Oct 18 19:32:27.170: INFO: stderr: "" Oct 18 19:32:27.170: INFO: stdout of ls -idlh /usr/share/nginx/html on web-0: 2 drwxr-xr-x 3 root root 4.0K Oct 19 02:25 /usr/share/nginx/html Oct 18 19:32:27.171: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-1 -- /bin/sh -c ls -idlh /usr/share/nginx/html' Oct 18 19:32:27.687: INFO: stderr: "" Oct 18 19:32:27.688: INFO: stdout of ls -idlh /usr/share/nginx/html on web-1: 2 drwxr-xr-x 3 root root 4.0K Oct 19 02:29 /usr/share/nginx/html Oct 18 19:32:27.688: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-2 -- /bin/sh -c ls -idlh /usr/share/nginx/html' Oct 18 19:32:28.177: INFO: stderr: "" Oct 18 19:32:28.177: INFO: stdout of ls -idlh /usr/share/nginx/html on web-2: 2 drwxr-xr-x 3 root root 4.0K Oct 19 02:32 /usr/share/nginx/html Oct 18 19:32:28.183: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-0 -- /bin/sh -c find /usr/share/nginx/html' Oct 18 19:32:28.690: INFO: stderr: "" Oct 18 19:32:28.690: INFO: stdout of find /usr/share/nginx/html on web-0: /usr/share/nginx/html /usr/share/nginx/html/lost+found Oct 18 19:32:28.690: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-1 -- /bin/sh -c find /usr/share/nginx/html' Oct 18 19:32:29.166: INFO: stderr: "" Oct 18 19:32:29.166: INFO: stdout of find /usr/share/nginx/html on web-1: /usr/share/nginx/html /usr/share/nginx/html/lost+found Oct 18 19:32:29.166: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-2 -- /bin/sh -c find /usr/share/nginx/html' Oct 18 19:32:29.696: INFO: stderr: "" Oct 18 19:32:29.696: INFO: stdout of find /usr/share/nginx/html on web-2: /usr/share/nginx/html /usr/share/nginx/html/lost+found Oct 18 19:32:29.707: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-0 -- /bin/sh -c touch /usr/share/nginx/html/1508380346587629054' Oct 18 19:32:30.171: INFO: stderr: "" Oct 18 19:32:30.171: INFO: stdout of touch /usr/share/nginx/html/1508380346587629054 on web-0: Oct 18 19:32:30.171: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-1 -- /bin/sh -c touch /usr/share/nginx/html/1508380346587629054' Oct 18 19:32:30.653: INFO: stderr: "" Oct 18 19:32:30.653: INFO: stdout of touch /usr/share/nginx/html/1508380346587629054 on web-1: Oct 18 19:32:30.654: INFO: Running '/root/divyenp/kubernetes/_output/bin/kubectl --server=https://10.192.38.85 --kubeconfig=/root/.kube/config exec --namespace=e2e-tests-vsphere-statefulset-gnfmp web-2 -- /bin/sh -c touch /usr/share/nginx/html/1508380346587629054' Oct 18 19:32:31.149: INFO: stderr: "" Oct 18 19:32:31.150: INFO: stdout of touch /usr/share/nginx/html/1508380346587629054 on web-2: STEP: Scaling down statefulsets to number of Replica: 2 Oct 18 19:32:31.263: INFO: Scaling statefulset web to 2 Oct 18 19:32:51.314: INFO: Waiting for statefulset status.replicas updated to 2 STEP: Verify Volumes are detached from Nodes after Statefulsets is scaled down Oct 18 19:32:51.524: INFO: Waiting for Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-67b7e88c-b475-11e7-a38c-0050569c555f.vmdk" to detach from Node: "kubernetes-node2" Oct 18 19:33:01.657: INFO: Volume "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-67b7e88c-b475-11e7-a38c-0050569c555f.vmdk" appears to have successfully detached from "kubernetes-node2". STEP: Scaling up statefulsets to number of Replica: 3 Oct 18 19:33:01.657: INFO: Scaling statefulset web to 3 Oct 18 19:33:11.731: INFO: Waiting for statefulset status.replicas updated to 3 Oct 18 19:33:11.747: INFO: Waiting for statefulset status.replicas updated to 3 STEP: Verify all volumes are attached to Nodes after Statefulsets is scaled up Oct 18 19:33:13.823: INFO: Verify Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-a6cf15ef-b474-11e7-a38c-0050569c555f.vmdk" is attached to the Node: "kubernetes-node4" Oct 18 19:33:15.990: INFO: Verify Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-cfb65f92-b474-11e7-a38c-0050569c555f.vmdk" is attached to the Node: "kubernetes-node3" Oct 18 19:33:18.154: INFO: Verify Volume: "[vsanDatastore] 1874c359-f300-a0cc-fd7e-02002a623c85/kubernetes-dynamic-pvc-67b7e88c-b475-11e7-a38c-0050569c555f.vmdk" is attached to the Node: "kubernetes-node2" [AfterEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/framework/framework.go:134 Oct 18 19:33:18.323: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready STEP: Destroying namespace "e2e-tests-vsphere-statefulset-gnfmp" for this suite. Oct 18 19:33:44.960: INFO: namespace: e2e-tests-vsphere-statefulset-gnfmp, resource: bindings, ignored listing per whitelist Oct 18 19:33:44.960: INFO: namespace e2e-tests-vsphere-statefulset-gnfmp deletion completed in 26.620223678s [AfterEach] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:67 Oct 18 19:33:44.960: INFO: Deleting all statefulset in namespace: e2e-tests-vsphere-statefulset-gnfmp • [SLOW TEST:548.654 seconds] [sig-storage] vsphere statefulset /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/framework.go:22 vsphere statefulset testing /root/divyenp/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/vsphere_statefulsets.go:155 ------------------------------ SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSOct 18 19:33:45.006: INFO: Running AfterSuite actions on all node Oct 18 19:33:45.006: INFO: Running AfterSuite actions on node 1 Ran 1 of 713 Specs in 548.875 seconds SUCCESS! -- 1 Passed | 0 Failed | 0 Pending | 712 Skipped PASS Ginkgo ran 1 suite in 9m9.728218415s Test Suite Passed 2017/10/18 19:33:45 util.go:156: Step './hack/ginkgo-e2e.sh --ginkgo.focus=vsphere\sstatefulset\stesting' finished in 9m10.656371481s 2017/10/18 19:33:45 e2e.go:81: Done ``` VMware Reviewers: @rohitjogvmw @BaluDontu @tusharnt **Release note**: ```release-note NONE ```
366 lines
11 KiB
Go
366 lines
11 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package storage
|
|
|
|
import (
|
|
"fmt"
|
|
"path/filepath"
|
|
"strconv"
|
|
"time"
|
|
|
|
. "github.com/onsi/gomega"
|
|
"k8s.io/api/core/v1"
|
|
storage "k8s.io/api/storage/v1"
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
k8stype "k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/uuid"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
|
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
)
|
|
|
|
// Sanity check for vSphere testing. Verify the persistent disk attached to the node.
|
|
func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
|
|
var (
|
|
isAttached bool
|
|
err error
|
|
)
|
|
if vsp == nil {
|
|
vsp, err = vsphere.GetVSphere()
|
|
Expect(err).NotTo(HaveOccurred())
|
|
}
|
|
isAttached, err = vsp.DiskIsAttached(volumePath, nodeName)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
return isAttached, err
|
|
}
|
|
|
|
// Wait until vsphere vmdk is deteched from the given node or time out after 5 minutes
|
|
func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
|
|
var (
|
|
err error
|
|
diskAttached = true
|
|
detachTimeout = 5 * time.Minute
|
|
detachPollTime = 10 * time.Second
|
|
)
|
|
if vsp == nil {
|
|
vsp, err = vsphere.GetVSphere()
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
|
|
diskAttached, err = verifyVSphereDiskAttached(vsp, volumePath, nodeName)
|
|
if err != nil {
|
|
return true, err
|
|
}
|
|
if !diskAttached {
|
|
framework.Logf("Volume %q appears to have successfully detached from %q.",
|
|
volumePath, nodeName)
|
|
return true, nil
|
|
}
|
|
framework.Logf("Waiting for Volume %q to detach from %q.", volumePath, nodeName)
|
|
return false, nil
|
|
})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if diskAttached {
|
|
return fmt.Errorf("Gave up waiting for Volume %q to detach from %q after %v", volumePath, nodeName, detachTimeout)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
|
|
func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
|
|
var (
|
|
pvConfig framework.PersistentVolumeConfig
|
|
pv *v1.PersistentVolume
|
|
claimRef *v1.ObjectReference
|
|
)
|
|
pvConfig = framework.PersistentVolumeConfig{
|
|
NamePrefix: "vspherepv-",
|
|
PVSource: v1.PersistentVolumeSource{
|
|
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
|
VolumePath: volumePath,
|
|
FSType: "ext4",
|
|
},
|
|
},
|
|
Prebind: nil,
|
|
}
|
|
|
|
pv = &v1.PersistentVolume{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
GenerateName: pvConfig.NamePrefix,
|
|
Annotations: map[string]string{
|
|
volumehelper.VolumeGidAnnotationKey: "777",
|
|
},
|
|
},
|
|
Spec: v1.PersistentVolumeSpec{
|
|
PersistentVolumeReclaimPolicy: persistentVolumeReclaimPolicy,
|
|
Capacity: v1.ResourceList{
|
|
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
|
|
},
|
|
PersistentVolumeSource: pvConfig.PVSource,
|
|
AccessModes: []v1.PersistentVolumeAccessMode{
|
|
v1.ReadWriteOnce,
|
|
},
|
|
ClaimRef: claimRef,
|
|
},
|
|
}
|
|
if labels != nil {
|
|
pv.Labels = labels
|
|
}
|
|
return pv
|
|
}
|
|
|
|
// function to get vsphere persistent volume spec with given selector labels.
|
|
func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
|
|
var (
|
|
pvc *v1.PersistentVolumeClaim
|
|
)
|
|
pvc = &v1.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
GenerateName: "pvc-",
|
|
Namespace: namespace,
|
|
},
|
|
Spec: v1.PersistentVolumeClaimSpec{
|
|
AccessModes: []v1.PersistentVolumeAccessMode{
|
|
v1.ReadWriteOnce,
|
|
},
|
|
Resources: v1.ResourceRequirements{
|
|
Requests: v1.ResourceList{
|
|
v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
if labels != nil {
|
|
pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
|
|
}
|
|
|
|
return pvc
|
|
}
|
|
|
|
// function to create vmdk volume
|
|
func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) {
|
|
var (
|
|
volumePath string
|
|
err error
|
|
)
|
|
if volumeOptions == nil {
|
|
volumeOptions = new(vclib.VolumeOptions)
|
|
volumeOptions.CapacityKB = 2097152
|
|
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
|
}
|
|
volumePath, err = vsp.CreateVolume(volumeOptions)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
return volumePath, nil
|
|
}
|
|
|
|
// function to write content to the volume backed by given PVC
|
|
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
|
runInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
|
|
framework.Logf("Done with writing content to volume")
|
|
}
|
|
|
|
// function to verify content is matching on the volume backed for given PVC
|
|
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
|
|
runInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
|
|
framework.Logf("Successfully verified content of the volume")
|
|
}
|
|
|
|
func getVSphereStorageClassSpec(name string, scParameters map[string]string) *storage.StorageClass {
|
|
var sc *storage.StorageClass
|
|
|
|
sc = &storage.StorageClass{
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "StorageClass",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
},
|
|
Provisioner: "kubernetes.io/vsphere-volume",
|
|
}
|
|
if scParameters != nil {
|
|
sc.Parameters = scParameters
|
|
}
|
|
return sc
|
|
}
|
|
|
|
func getVSphereClaimSpecWithStorageClassAnnotation(ns string, diskSize string, storageclass *storage.StorageClass) *v1.PersistentVolumeClaim {
|
|
scAnnotation := make(map[string]string)
|
|
scAnnotation[v1.BetaStorageClassAnnotation] = storageclass.Name
|
|
|
|
claim := &v1.PersistentVolumeClaim{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
GenerateName: "pvc-",
|
|
Namespace: ns,
|
|
Annotations: scAnnotation,
|
|
},
|
|
Spec: v1.PersistentVolumeClaimSpec{
|
|
AccessModes: []v1.PersistentVolumeAccessMode{
|
|
v1.ReadWriteOnce,
|
|
},
|
|
Resources: v1.ResourceRequirements{
|
|
Requests: v1.ResourceList{
|
|
v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
return claim
|
|
}
|
|
|
|
// func to get pod spec with given volume claim, node selector labels and command
|
|
func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
|
|
pod := &v1.Pod{
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Pod",
|
|
APIVersion: "v1",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
GenerateName: "pod-pvc-",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "volume-tester",
|
|
Image: "busybox",
|
|
Command: []string{"/bin/sh"},
|
|
Args: []string{"-c", command},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "my-volume",
|
|
MountPath: "/mnt/test",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
RestartPolicy: v1.RestartPolicyNever,
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "my-volume",
|
|
VolumeSource: v1.VolumeSource{
|
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
ClaimName: claimName,
|
|
ReadOnly: false,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
if nodeSelectorKV != nil {
|
|
pod.Spec.NodeSelector = nodeSelectorKV
|
|
}
|
|
return pod
|
|
}
|
|
|
|
// func to get pod spec with given volume paths, node selector lables and container commands
|
|
func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
|
|
var volumeMounts []v1.VolumeMount
|
|
var volumes []v1.Volume
|
|
|
|
for index, volumePath := range volumePaths {
|
|
name := fmt.Sprintf("volume%v", index+1)
|
|
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
|
|
vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
|
|
vsphereVolume.VolumePath = volumePath
|
|
vsphereVolume.FSType = "ext4"
|
|
volumes = append(volumes, v1.Volume{Name: name})
|
|
volumes[index].VolumeSource.VsphereVolume = vsphereVolume
|
|
}
|
|
|
|
if commands == nil || len(commands) == 0 {
|
|
commands = []string{
|
|
"/bin/sh",
|
|
"-c",
|
|
"while true; do sleep 2; done",
|
|
}
|
|
}
|
|
pod := &v1.Pod{
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Pod",
|
|
APIVersion: "v1",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
GenerateName: "vsphere-e2e-",
|
|
},
|
|
Spec: v1.PodSpec{
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "vsphere-e2e-container-" + string(uuid.NewUUID()),
|
|
Image: "busybox",
|
|
Command: commands,
|
|
VolumeMounts: volumeMounts,
|
|
},
|
|
},
|
|
RestartPolicy: v1.RestartPolicyNever,
|
|
Volumes: volumes,
|
|
},
|
|
}
|
|
|
|
if keyValuelabel != nil {
|
|
pod.Spec.NodeSelector = keyValuelabel
|
|
}
|
|
return pod
|
|
}
|
|
|
|
func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths []string) {
|
|
for _, filePath := range filePaths {
|
|
_, err := framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", namespace), podName, "--", "/bin/ls", filePath)
|
|
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
|
|
}
|
|
}
|
|
|
|
func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
|
|
for _, filePath := range filePaths {
|
|
err := framework.CreateEmptyFileOnPod(namespace, podName, filePath)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
}
|
|
}
|
|
|
|
// verify volumes are attached to the node and are accessible in pod
|
|
func verifyVSphereVolumesAccessible(pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) {
|
|
nodeName := pod.Spec.NodeName
|
|
namespace := pod.Namespace
|
|
for index, pv := range persistentvolumes {
|
|
// Verify disks are attached to the node
|
|
isAttached, err := verifyVSphereDiskAttached(vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
|
|
Expect(err).NotTo(HaveOccurred())
|
|
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
|
|
// Verify Volumes are accessible
|
|
filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
|
|
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
|
|
Expect(err).NotTo(HaveOccurred())
|
|
}
|
|
}
|
|
|
|
// Get vSphere Volume Path from PVC
|
|
func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string, claimName string) string {
|
|
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(claimName, metav1.GetOptions{})
|
|
Expect(err).NotTo(HaveOccurred())
|
|
pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{})
|
|
Expect(err).NotTo(HaveOccurred())
|
|
return pv.Spec.VsphereVolume.VolumePath
|
|
}
|