Drop vsphere cloud provider
Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
		
							
								
								
									
										206
									
								
								LICENSES/vendor/github.com/vmware/govmomi/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										206
									
								
								LICENSES/vendor/github.com/vmware/govmomi/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,206 +0,0 @@ | ||||
| = vendor/github.com/vmware/govmomi licensed under: = | ||||
|  | ||||
|  | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
|  | ||||
| = vendor/github.com/vmware/govmomi/LICENSE.txt 3b83ef96387f14655fc854ddc3c6bd57 | ||||
| @@ -1,10 +0,0 @@ | ||||
| apiVersion: storage.k8s.io/v1 | ||||
| kind: StorageClass | ||||
| metadata: | ||||
|   name: thin | ||||
|   annotations: | ||||
|     storageclass.kubernetes.io/is-default-class: "true" | ||||
|   labels: | ||||
| provisioner: kubernetes.io/vsphere-volume | ||||
| parameters: | ||||
|     diskformat: thin | ||||
| @@ -26,5 +26,4 @@ import ( | ||||
| 	// NOTE: Importing all in-tree cloud-providers is not required when | ||||
| 	// implementing an out-of-tree cloud-provider. | ||||
| 	_ "k8s.io/legacy-cloud-providers/gce" | ||||
| 	_ "k8s.io/legacy-cloud-providers/vsphere" | ||||
| ) | ||||
|   | ||||
							
								
								
									
										3
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										3
									
								
								go.mod
									
									
									
									
									
								
							| @@ -62,7 +62,6 @@ require ( | ||||
| 	github.com/spf13/pflag v1.0.5 | ||||
| 	github.com/stretchr/testify v1.8.4 | ||||
| 	github.com/vishvananda/netlink v1.1.0 | ||||
| 	github.com/vmware/govmomi v0.30.6 | ||||
| 	go.etcd.io/etcd/api/v3 v3.5.10 | ||||
| 	go.etcd.io/etcd/client/pkg/v3 v3.5.10 | ||||
| 	go.etcd.io/etcd/client/v3 v3.5.10 | ||||
| @@ -87,7 +86,6 @@ require ( | ||||
| 	google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d | ||||
| 	google.golang.org/grpc v1.58.3 | ||||
| 	google.golang.org/protobuf v1.31.0 | ||||
| 	gopkg.in/gcfg.v1 v1.2.3 | ||||
| 	gopkg.in/square/go-jose.v2 v2.6.0 | ||||
| 	gopkg.in/yaml.v2 v2.4.0 | ||||
| 	gopkg.in/yaml.v3 v3.0.1 | ||||
| @@ -225,6 +223,7 @@ require ( | ||||
| 	google.golang.org/appengine v1.6.7 // indirect | ||||
| 	google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5 // indirect | ||||
| 	google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect | ||||
| 	gopkg.in/gcfg.v1 v1.2.3 // indirect | ||||
| 	gopkg.in/inf.v0 v0.9.1 // indirect | ||||
| 	gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect | ||||
| 	gopkg.in/warnings.v0 v0.1.2 // indirect | ||||
|   | ||||
							
								
								
									
										6
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								go.sum
									
									
									
									
									
								
							| @@ -184,7 +184,6 @@ github.com/Microsoft/hcsshim v0.8.25/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01 | ||||
| github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= | ||||
| github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= | ||||
| github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= | ||||
| github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= | ||||
| github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= | ||||
| github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= | ||||
| github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= | ||||
| @@ -304,7 +303,6 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh | ||||
| github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= | ||||
| github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= | ||||
| github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= | ||||
| github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= | ||||
| github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= | ||||
| github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= | ||||
| github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= | ||||
| @@ -673,7 +671,6 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 | ||||
| github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= | ||||
| github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= | ||||
| github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= | ||||
| github.com/rasky/go-xdr v0.0.0-20170217172119-4930550ba2e2/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= | ||||
| github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= | ||||
| github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= | ||||
| github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= | ||||
| @@ -746,9 +743,6 @@ github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYp | ||||
| github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= | ||||
| github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= | ||||
| github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= | ||||
| github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= | ||||
| github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= | ||||
| github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= | ||||
| github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= | ||||
| github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= | ||||
| github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= | ||||
|   | ||||
| @@ -22,5 +22,4 @@ package cloudprovider | ||||
| import ( | ||||
| 	// Cloud providers | ||||
| 	_ "k8s.io/legacy-cloud-providers/gce" | ||||
| 	_ "k8s.io/legacy-cloud-providers/vsphere" | ||||
| ) | ||||
|   | ||||
| @@ -1,26 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package cloudprovider | ||||
|  | ||||
| import ( | ||||
| 	// transitive test dependencies are not vendored by go modules | ||||
| 	// so we have to explicitly import them here | ||||
| 	_ "k8s.io/legacy-cloud-providers/vsphere/testing" | ||||
| ) | ||||
| @@ -41,7 +41,6 @@ var ( | ||||
| 		detail   string | ||||
| 	}{ | ||||
| 		{"gce", false, "The GCE provider is deprecated and will be removed in a future release. Please use https://github.com/kubernetes/cloud-provider-gcp"}, | ||||
| 		{"vsphere", false, "The vSphere provider is deprecated and will be removed in a future release. Please use https://github.com/kubernetes/cloud-provider-vsphere"}, | ||||
| 	} | ||||
| ) | ||||
|  | ||||
|   | ||||
| @@ -9,7 +9,6 @@ require ( | ||||
| 	github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b | ||||
| 	github.com/google/go-cmp v0.6.0 | ||||
| 	github.com/stretchr/testify v1.8.4 | ||||
| 	github.com/vmware/govmomi v0.30.6 | ||||
| 	golang.org/x/oauth2 v0.10.0 | ||||
| 	google.golang.org/api v0.126.0 | ||||
| 	gopkg.in/gcfg.v1 v1.2.3 | ||||
| @@ -72,7 +71,6 @@ require ( | ||||
| 	gopkg.in/warnings.v0 v0.1.2 // indirect | ||||
| 	gopkg.in/yaml.v2 v2.4.0 // indirect | ||||
| 	gopkg.in/yaml.v3 v3.0.1 // indirect | ||||
| 	k8s.io/component-helpers v0.0.0 // indirect | ||||
| 	k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e // indirect | ||||
| 	sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect | ||||
| 	sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect | ||||
|   | ||||
							
								
								
									
										6
									
								
								staging/src/k8s.io/legacy-cloud-providers/go.sum
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										6
									
								
								staging/src/k8s.io/legacy-cloud-providers/go.sum
									
									
									
										generated
									
									
									
								
							| @@ -55,7 +55,6 @@ github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f1181 | ||||
| github.com/GoogleCloudPlatform/k8s-cloud-provider v1.18.1-0.20220218231025-f11817397a1b/go.mod h1:FNj4KYEAAHfYu68kRYolGoxkaJn+6mdEsaM12VTwuI0= | ||||
| github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= | ||||
| github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= | ||||
| github.com/a8m/tree v0.0.0-20210115125333-10a5fd5b637d/go.mod h1:FSdwKX97koS5efgm8WevNf7XS3PqtyFkKDDXrz778cg= | ||||
| github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= | ||||
| github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= | ||||
| github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= | ||||
| @@ -88,7 +87,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 | ||||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= | ||||
| github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/dougm/pretty v0.0.0-20171025230240-2ee9d7453c02/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= | ||||
| github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= | ||||
| github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= | ||||
| github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= | ||||
| @@ -281,7 +279,6 @@ github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdO | ||||
| github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= | ||||
| github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= | ||||
| github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= | ||||
| github.com/rasky/go-xdr v0.0.0-20170217172119-4930550ba2e2/go.mod h1:Nfe4efndBz4TibWycNE+lqyJZiMX4ycx+QKV8Ta0f/o= | ||||
| github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= | ||||
| github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= | ||||
| github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= | ||||
| @@ -303,9 +300,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO | ||||
| github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= | ||||
| github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= | ||||
| github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= | ||||
| github.com/vmware/govmomi v0.30.6 h1:O3tjSwQBy0XwI5uK1/yVIfQ1LP9bAECEDUfifnyGs9U= | ||||
| github.com/vmware/govmomi v0.30.6/go.mod h1:epgoslm97rLECMV4D+08ORzUBEU7boFSepKjt7AYVGg= | ||||
| github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= | ||||
| github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= | ||||
| github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||
| github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||
|   | ||||
| @@ -1,12 +0,0 @@ | ||||
| # See the OWNERS docs at https://go.k8s.io/owners | ||||
|  | ||||
| emeritus_approvers: | ||||
|   - baludontu | ||||
|   - divyenpatel | ||||
|   - frapposelli | ||||
|   - dougm | ||||
|   - SandeepPissay | ||||
|   - imkin | ||||
|   - abrarshivani | ||||
| reviewers: | ||||
|   - divyenpatel | ||||
| @@ -1,170 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"net/http" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	corev1 "k8s.io/api/core/v1" | ||||
| 	apierrors "k8s.io/apimachinery/pkg/api/errors" | ||||
| 	v1 "k8s.io/client-go/listers/core/v1" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // Error Messages | ||||
| const ( | ||||
| 	CredentialsNotFoundErrMsg = "Credentials not found" | ||||
| 	CredentialMissingErrMsg   = "Username/Password is missing" | ||||
| 	UnknownSecretKeyErrMsg    = "Unknown secret key" | ||||
| ) | ||||
|  | ||||
| // Error constants | ||||
| var ( | ||||
| 	ErrCredentialsNotFound = errors.New(CredentialsNotFoundErrMsg) | ||||
| 	ErrCredentialMissing   = errors.New(CredentialMissingErrMsg) | ||||
| 	ErrUnknownSecretKey    = errors.New(UnknownSecretKeyErrMsg) | ||||
| ) | ||||
|  | ||||
| type SecretCache struct { | ||||
| 	cacheLock     sync.Mutex | ||||
| 	VirtualCenter map[string]*Credential | ||||
| 	Secret        *corev1.Secret | ||||
| } | ||||
|  | ||||
| type Credential struct { | ||||
| 	User     string `gcfg:"user"` | ||||
| 	Password string `gcfg:"password" datapolicy:"password"` | ||||
| } | ||||
|  | ||||
| type SecretCredentialManager struct { | ||||
| 	SecretName      string | ||||
| 	SecretNamespace string | ||||
| 	SecretLister    v1.SecretLister | ||||
| 	Cache           *SecretCache | ||||
| } | ||||
|  | ||||
| // GetCredential returns credentials for the given vCenter Server. | ||||
| // GetCredential returns error if Secret is not added. | ||||
| // GetCredential return error is the secret doesn't contain any credentials. | ||||
| func (secretCredentialManager *SecretCredentialManager) GetCredential(server string) (*Credential, error) { | ||||
| 	err := secretCredentialManager.updateCredentialsMap() | ||||
| 	if err != nil { | ||||
| 		statusErr, ok := err.(*apierrors.StatusError) | ||||
| 		if (ok && statusErr.ErrStatus.Code != http.StatusNotFound) || !ok { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		// Handle secrets deletion by finding credentials from cache | ||||
| 		klog.Warningf("secret %q not found in namespace %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace) | ||||
| 	} | ||||
|  | ||||
| 	// Converting server FQIN to lowercase to consolidate with config parsing approach | ||||
| 	server = strings.ToLower(server) | ||||
| 	credential, found := secretCredentialManager.Cache.GetCredential(server) | ||||
| 	if !found { | ||||
| 		klog.Errorf("credentials not found for server %q", server) | ||||
| 		return nil, ErrCredentialsNotFound | ||||
| 	} | ||||
| 	return &credential, nil | ||||
| } | ||||
|  | ||||
| func (secretCredentialManager *SecretCredentialManager) updateCredentialsMap() error { | ||||
| 	if secretCredentialManager.SecretLister == nil { | ||||
| 		return fmt.Errorf("secretLister is not initialized") | ||||
| 	} | ||||
| 	secret, err := secretCredentialManager.SecretLister.Secrets(secretCredentialManager.SecretNamespace).Get(secretCredentialManager.SecretName) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Cannot get secret %s in namespace %s. error: %q", secretCredentialManager.SecretName, secretCredentialManager.SecretNamespace, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	cacheSecret := secretCredentialManager.Cache.GetSecret() | ||||
| 	if cacheSecret != nil && | ||||
| 		cacheSecret.GetResourceVersion() == secret.GetResourceVersion() { | ||||
| 		klog.V(4).Infof("VCP SecretCredentialManager: Secret %q will not be updated in cache. Since, secrets have same resource version %q", secretCredentialManager.SecretName, cacheSecret.GetResourceVersion()) | ||||
| 		return nil | ||||
| 	} | ||||
| 	secretCredentialManager.Cache.UpdateSecret(secret) | ||||
| 	return secretCredentialManager.Cache.parseSecret() | ||||
| } | ||||
|  | ||||
| func (cache *SecretCache) GetSecret() *corev1.Secret { | ||||
| 	cache.cacheLock.Lock() | ||||
| 	defer cache.cacheLock.Unlock() | ||||
| 	return cache.Secret | ||||
| } | ||||
|  | ||||
| func (cache *SecretCache) UpdateSecret(secret *corev1.Secret) { | ||||
| 	cache.cacheLock.Lock() | ||||
| 	defer cache.cacheLock.Unlock() | ||||
| 	cache.Secret = secret | ||||
| } | ||||
|  | ||||
| func (cache *SecretCache) GetCredential(server string) (Credential, bool) { | ||||
| 	cache.cacheLock.Lock() | ||||
| 	defer cache.cacheLock.Unlock() | ||||
| 	credential, found := cache.VirtualCenter[server] | ||||
| 	if !found { | ||||
| 		return Credential{}, found | ||||
| 	} | ||||
| 	return *credential, found | ||||
| } | ||||
|  | ||||
| func (cache *SecretCache) parseSecret() error { | ||||
| 	cache.cacheLock.Lock() | ||||
| 	defer cache.cacheLock.Unlock() | ||||
| 	return parseConfig(cache.Secret.Data, cache.VirtualCenter) | ||||
| } | ||||
|  | ||||
| // parseConfig returns vCenter ip/fdqn mapping to its credentials viz. Username and Password. | ||||
| func parseConfig(data map[string][]byte, config map[string]*Credential) error { | ||||
| 	if len(data) == 0 { | ||||
| 		return ErrCredentialMissing | ||||
| 	} | ||||
| 	for credentialKey, credentialValue := range data { | ||||
| 		credentialKey = strings.ToLower(credentialKey) | ||||
| 		vcServer := "" | ||||
| 		if strings.HasSuffix(credentialKey, "password") { | ||||
| 			vcServer = strings.Split(credentialKey, ".password")[0] | ||||
| 			if _, ok := config[vcServer]; !ok { | ||||
| 				config[vcServer] = &Credential{} | ||||
| 			} | ||||
| 			config[vcServer].Password = string(credentialValue) | ||||
| 		} else if strings.HasSuffix(credentialKey, "username") { | ||||
| 			vcServer = strings.Split(credentialKey, ".username")[0] | ||||
| 			if _, ok := config[vcServer]; !ok { | ||||
| 				config[vcServer] = &Credential{} | ||||
| 			} | ||||
| 			config[vcServer].User = string(credentialValue) | ||||
| 		} else { | ||||
| 			klog.Errorf("Unknown secret key %s", credentialKey) | ||||
| 			return ErrUnknownSecretKey | ||||
| 		} | ||||
| 	} | ||||
| 	for vcServer, credential := range config { | ||||
| 		if credential.User == "" || credential.Password == "" { | ||||
| 			klog.Errorf("Username/Password is missing for server %s", vcServer) | ||||
| 			return ErrCredentialMissing | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| @@ -1,383 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	corev1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/labels" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	"k8s.io/client-go/kubernetes/fake" | ||||
| ) | ||||
|  | ||||
| func TestSecretCredentialManager_GetCredential(t *testing.T) { | ||||
| 	var ( | ||||
| 		userKey             = "username" | ||||
| 		passwordKey         = "password" | ||||
| 		testUser            = "user" | ||||
| 		testPassword        = "password" | ||||
| 		testServer          = "0.0.0.0" | ||||
| 		testServer2         = "0.0.1.1" | ||||
| 		testServerFQIN      = "ExAmple.com" | ||||
| 		testUserServer2     = "user1" | ||||
| 		testPasswordServer2 = "password1" | ||||
| 		testIncorrectServer = "1.1.1.1" | ||||
| 	) | ||||
| 	var ( | ||||
| 		secretName      = "vsconf" | ||||
| 		secretNamespace = "kube-system" | ||||
| 	) | ||||
| 	var ( | ||||
| 		addSecretOp      = "ADD_SECRET_OP" | ||||
| 		getCredentialsOp = "GET_CREDENTIAL_OP" | ||||
| 		deleteSecretOp   = "DELETE_SECRET_OP" | ||||
| 	) | ||||
| 	type GetCredentialsTest struct { | ||||
| 		server   string | ||||
| 		username string | ||||
| 		password string | ||||
| 		err      error | ||||
| 	} | ||||
| 	type OpSecretTest struct { | ||||
| 		secret *corev1.Secret | ||||
| 	} | ||||
| 	type testEnv struct { | ||||
| 		testName       string | ||||
| 		ops            []string | ||||
| 		expectedValues []interface{} | ||||
| 	} | ||||
|  | ||||
| 	client := &fake.Clientset{} | ||||
| 	metaObj := metav1.ObjectMeta{ | ||||
| 		Name:      secretName, | ||||
| 		Namespace: secretNamespace, | ||||
| 	} | ||||
|  | ||||
| 	defaultSecret := &corev1.Secret{ | ||||
| 		ObjectMeta: metaObj, | ||||
| 		Data: map[string][]byte{ | ||||
| 			testServer + "." + userKey:     []byte(testUser), | ||||
| 			testServer + "." + passwordKey: []byte(testPassword), | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	multiVCSecret := &corev1.Secret{ | ||||
| 		ObjectMeta: metaObj, | ||||
| 		Data: map[string][]byte{ | ||||
| 			testServer + "." + userKey:      []byte(testUser), | ||||
| 			testServer + "." + passwordKey:  []byte(testPassword), | ||||
| 			testServer2 + "." + userKey:     []byte(testUserServer2), | ||||
| 			testServer2 + "." + passwordKey: []byte(testPasswordServer2), | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	fqinSecret := &corev1.Secret{ | ||||
| 		ObjectMeta: metaObj, | ||||
| 		Data: map[string][]byte{ | ||||
| 			testServerFQIN + "." + userKey:     []byte(testUser), | ||||
| 			testServerFQIN + "." + passwordKey: []byte(testPassword), | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	emptySecret := &corev1.Secret{ | ||||
| 		ObjectMeta: metaObj, | ||||
| 		Data:       map[string][]byte{}, | ||||
| 	} | ||||
|  | ||||
| 	tests := []testEnv{ | ||||
| 		{ | ||||
| 			testName: "Deleting secret should give the credentials from cache", | ||||
| 			ops:      []string{addSecretOp, getCredentialsOp, deleteSecretOp, getCredentialsOp}, | ||||
| 			expectedValues: []interface{}{ | ||||
| 				OpSecretTest{ | ||||
| 					secret: defaultSecret, | ||||
| 				}, | ||||
| 				GetCredentialsTest{ | ||||
| 					username: testUser, | ||||
| 					password: testPassword, | ||||
| 					server:   testServer, | ||||
| 				}, | ||||
| 				OpSecretTest{ | ||||
| 					secret: defaultSecret, | ||||
| 				}, | ||||
| 				GetCredentialsTest{ | ||||
| 					username: testUser, | ||||
| 					password: testPassword, | ||||
| 					server:   testServer, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "Add secret and get credentials", | ||||
| 			ops:      []string{addSecretOp, getCredentialsOp}, | ||||
| 			expectedValues: []interface{}{ | ||||
| 				OpSecretTest{ | ||||
| 					secret: defaultSecret, | ||||
| 				}, | ||||
| 				GetCredentialsTest{ | ||||
| 					username: testUser, | ||||
| 					password: testPassword, | ||||
| 					server:   testServer, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "Getcredentials should fail by not adding at secret at first time", | ||||
| 			ops:      []string{getCredentialsOp}, | ||||
| 			expectedValues: []interface{}{ | ||||
| 				GetCredentialsTest{ | ||||
| 					username: testUser, | ||||
| 					password: testPassword, | ||||
| 					server:   testServer, | ||||
| 					err:      ErrCredentialsNotFound, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "GetCredential should fail to get credentials from empty secrets", | ||||
| 			ops:      []string{addSecretOp, getCredentialsOp}, | ||||
| 			expectedValues: []interface{}{ | ||||
| 				OpSecretTest{ | ||||
| 					secret: emptySecret, | ||||
| 				}, | ||||
| 				GetCredentialsTest{ | ||||
| 					server: testServer, | ||||
| 					err:    ErrCredentialMissing, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "GetCredential should fail to get credentials for invalid server", | ||||
| 			ops:      []string{addSecretOp, getCredentialsOp}, | ||||
| 			expectedValues: []interface{}{ | ||||
| 				OpSecretTest{ | ||||
| 					secret: defaultSecret, | ||||
| 				}, | ||||
| 				GetCredentialsTest{ | ||||
| 					server: testIncorrectServer, | ||||
| 					err:    ErrCredentialsNotFound, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "GetCredential for multi-vc", | ||||
| 			ops:      []string{addSecretOp, getCredentialsOp}, | ||||
| 			expectedValues: []interface{}{ | ||||
| 				OpSecretTest{ | ||||
| 					secret: multiVCSecret, | ||||
| 				}, | ||||
| 				GetCredentialsTest{ | ||||
| 					server:   testServer2, | ||||
| 					username: testUserServer2, | ||||
| 					password: testPasswordServer2, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "GetCredential for FQIN server name", | ||||
| 			ops:      []string{addSecretOp, getCredentialsOp}, | ||||
| 			expectedValues: []interface{}{ | ||||
| 				OpSecretTest{ | ||||
| 					fqinSecret, | ||||
| 				}, | ||||
| 				GetCredentialsTest{ | ||||
| 					username: testUser, | ||||
| 					password: testPassword, | ||||
| 					server:   testServerFQIN, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	// TODO: replace 0 with NoResyncPeriodFunc() once it moved out pkg/controller/controller_utils.go in k/k. | ||||
| 	informerFactory := informers.NewSharedInformerFactory(client, 0) | ||||
| 	secretInformer := informerFactory.Core().V1().Secrets() | ||||
| 	secretCredentialManager := &SecretCredentialManager{ | ||||
| 		SecretName:      secretName, | ||||
| 		SecretNamespace: secretNamespace, | ||||
| 		SecretLister:    secretInformer.Lister(), | ||||
| 		Cache: &SecretCache{ | ||||
| 			VirtualCenter: make(map[string]*Credential), | ||||
| 		}, | ||||
| 	} | ||||
| 	cleanupSecretCredentialManager := func() { | ||||
| 		secretCredentialManager.Cache.Secret = nil | ||||
| 		for key := range secretCredentialManager.Cache.VirtualCenter { | ||||
| 			delete(secretCredentialManager.Cache.VirtualCenter, key) | ||||
| 		} | ||||
| 		secrets, err := secretCredentialManager.SecretLister.List(labels.Everything()) | ||||
| 		if err != nil { | ||||
| 			t.Fatal("Failed to get all secrets from sharedInformer. error: ", err) | ||||
| 		} | ||||
| 		for _, secret := range secrets { | ||||
| 			err := secretInformer.Informer().GetIndexer().Delete(secret) | ||||
| 			if err != nil { | ||||
| 				t.Fatalf("Failed to delete secret from informer: %v", err) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, test := range tests { | ||||
| 		t.Logf("Executing Testcase: %s", test.testName) | ||||
| 		for ntest, op := range test.ops { | ||||
| 			switch op { | ||||
| 			case addSecretOp: | ||||
| 				expected := test.expectedValues[ntest].(OpSecretTest) | ||||
| 				t.Logf("Adding secret: %s", expected.secret) | ||||
| 				err := secretInformer.Informer().GetIndexer().Add(expected.secret) | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Failed to add secret to internal cache: %v", err) | ||||
| 				} | ||||
| 			case getCredentialsOp: | ||||
| 				expected := test.expectedValues[ntest].(GetCredentialsTest) | ||||
| 				credential, err := secretCredentialManager.GetCredential(expected.server) | ||||
| 				t.Logf("Retrieving credentials for server %s", expected.server) | ||||
| 				if err != expected.err { | ||||
| 					t.Fatalf("Fail to get credentials with error: %v", err) | ||||
| 				} | ||||
| 				if expected.err == nil { | ||||
| 					if expected.username != credential.User || | ||||
| 						expected.password != credential.Password { | ||||
| 						t.Fatalf("Received credentials %v "+ | ||||
| 							"are different than actual credential user:%s password:%s", credential, expected.username, | ||||
| 							expected.password) | ||||
| 					} | ||||
| 				} | ||||
| 			case deleteSecretOp: | ||||
| 				expected := test.expectedValues[ntest].(OpSecretTest) | ||||
| 				t.Logf("Deleting secret: %s", expected.secret) | ||||
| 				err := secretInformer.Informer().GetIndexer().Delete(expected.secret) | ||||
| 				if err != nil { | ||||
| 					t.Fatalf("Failed to delete secret to internal cache: %v", err) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		cleanupSecretCredentialManager() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestParseSecretConfig(t *testing.T) { | ||||
| 	var ( | ||||
| 		testUsername   = "Admin" | ||||
| 		testPassword   = "Password" | ||||
| 		testIP         = "10.20.30.40" | ||||
| 		testServerFQIN = "ExAmple.com" | ||||
| 	) | ||||
| 	var testcases = []struct { | ||||
| 		testName      string | ||||
| 		data          map[string][]byte | ||||
| 		config        map[string]*Credential | ||||
| 		expectedError error | ||||
| 	}{ | ||||
| 		{ | ||||
| 			testName: "Valid username and password", | ||||
| 			data: map[string][]byte{ | ||||
| 				"10.20.30.40.username": []byte(testUsername), | ||||
| 				"10.20.30.40.password": []byte(testPassword), | ||||
| 			}, | ||||
| 			config: map[string]*Credential{ | ||||
| 				testIP: { | ||||
| 					User:     testUsername, | ||||
| 					Password: testPassword, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedError: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "Invalid username key with valid password key", | ||||
| 			data: map[string][]byte{ | ||||
| 				"10.20.30.40.usernam":  []byte(testUsername), | ||||
| 				"10.20.30.40.password": []byte(testPassword), | ||||
| 			}, | ||||
| 			config:        nil, | ||||
| 			expectedError: ErrUnknownSecretKey, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "Missing username", | ||||
| 			data: map[string][]byte{ | ||||
| 				"10.20.30.40.password": []byte(testPassword), | ||||
| 			}, | ||||
| 			config: map[string]*Credential{ | ||||
| 				testIP: { | ||||
| 					Password: testPassword, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedError: ErrCredentialMissing, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "Missing password", | ||||
| 			data: map[string][]byte{ | ||||
| 				"10.20.30.40.username": []byte(testUsername), | ||||
| 			}, | ||||
| 			config: map[string]*Credential{ | ||||
| 				testIP: { | ||||
| 					User: testUsername, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedError: ErrCredentialMissing, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "FQIN stored as lowercase", | ||||
| 			data: map[string][]byte{ | ||||
| 				testServerFQIN + ".username": []byte(testUsername), | ||||
| 				testServerFQIN + ".password": []byte(testPassword), | ||||
| 			}, | ||||
| 			config: map[string]*Credential{ | ||||
| 				strings.ToLower(testServerFQIN): { | ||||
| 					User:     testUsername, | ||||
| 					Password: testPassword, | ||||
| 				}, | ||||
| 			}, | ||||
| 			expectedError: nil, | ||||
| 		}, | ||||
| 		{ | ||||
| 			testName: "IP with unknown key", | ||||
| 			data: map[string][]byte{ | ||||
| 				"10.20.30.40": []byte(testUsername), | ||||
| 			}, | ||||
| 			config:        nil, | ||||
| 			expectedError: ErrUnknownSecretKey, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	resultConfig := make(map[string]*Credential) | ||||
| 	cleanupResultConfig := func(config map[string]*Credential) { | ||||
| 		for k := range config { | ||||
| 			delete(config, k) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, testcase := range testcases { | ||||
| 		err := parseConfig(testcase.data, resultConfig) | ||||
| 		t.Logf("Executing Testcase: %s", testcase.testName) | ||||
| 		if err != testcase.expectedError { | ||||
| 			t.Fatalf("Parsing Secret failed for data %+v: %s", testcase.data, err) | ||||
| 		} | ||||
| 		if testcase.config != nil && !reflect.DeepEqual(testcase.config, resultConfig) { | ||||
| 			t.Fatalf("Parsing Secret failed for data %+v expected config %+v and actual config %+v", | ||||
| 				testcase.data, resultConfig, testcase.config) | ||||
| 		} | ||||
| 		cleanupResultConfig(resultConfig) | ||||
| 	} | ||||
| } | ||||
| @@ -1,17 +0,0 @@ | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
| @@ -1,564 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/api/errors" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	k8stypes "k8s.io/apimachinery/pkg/types" | ||||
| 	coreclients "k8s.io/client-go/kubernetes/typed/core/v1" | ||||
| 	corelisters "k8s.io/client-go/listers/core/v1" | ||||
| 	cloudprovider "k8s.io/cloud-provider" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| ) | ||||
|  | ||||
| // Stores info about the kubernetes node | ||||
| type NodeInfo struct { | ||||
| 	dataCenter *vclib.Datacenter | ||||
| 	vm         *vclib.VirtualMachine | ||||
| 	vcServer   string | ||||
| 	vmUUID     string | ||||
| 	zone       *cloudprovider.Zone | ||||
| } | ||||
|  | ||||
| func (n NodeInfo) String() string { | ||||
| 	return fmt.Sprintf("{datacenter: %v, vm: %v, vcServer: %s, vmUUID: %s, zone: %v}", | ||||
| 		*n.dataCenter, n.vm.Reference(), n.vcServer, n.vmUUID, *n.zone) | ||||
| } | ||||
|  | ||||
| type NodeManager struct { | ||||
| 	// TODO: replace map with concurrent map when k8s supports go v1.9 | ||||
|  | ||||
| 	// Maps the VC server to VSphereInstance | ||||
| 	vsphereInstanceMap map[string]*VSphereInstance | ||||
| 	// Maps node name to node info. | ||||
| 	nodeInfoMap map[string]*NodeInfo | ||||
| 	// Maps node name to node structure | ||||
| 	registeredNodes map[string]*v1.Node | ||||
| 	//CredentialsManager | ||||
| 	credentialManager *SecretCredentialManager | ||||
|  | ||||
| 	nodeLister corelisters.NodeLister | ||||
| 	nodeGetter coreclients.NodesGetter | ||||
|  | ||||
| 	// Mutexes | ||||
| 	registeredNodesLock   sync.RWMutex | ||||
| 	nodeInfoLock          sync.RWMutex | ||||
| 	credentialManagerLock sync.Mutex | ||||
| } | ||||
|  | ||||
| type NodeDetails struct { | ||||
| 	NodeName string | ||||
| 	vm       *vclib.VirtualMachine | ||||
| 	VMUUID   string | ||||
| 	Zone     *cloudprovider.Zone | ||||
| } | ||||
|  | ||||
| // TODO: Make it configurable in vsphere.conf | ||||
| const ( | ||||
| 	POOL_SIZE  = 8 | ||||
| 	QUEUE_SIZE = POOL_SIZE * 10 | ||||
| ) | ||||
|  | ||||
| func (nm *NodeManager) DiscoverNode(node *v1.Node) error { | ||||
| 	type VmSearch struct { | ||||
| 		vc         string | ||||
| 		datacenter *vclib.Datacenter | ||||
| 	} | ||||
|  | ||||
| 	var mutex = &sync.Mutex{} | ||||
| 	var globalErrMutex = &sync.Mutex{} | ||||
| 	var queueChannel chan *VmSearch | ||||
| 	var wg sync.WaitGroup | ||||
| 	var globalErr *error | ||||
|  | ||||
| 	queueChannel = make(chan *VmSearch, QUEUE_SIZE) | ||||
| 	nodeUUID, err := GetNodeUUID(node) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	klog.V(4).Infof("Discovering node %s with uuid %s", node.ObjectMeta.Name, nodeUUID) | ||||
|  | ||||
| 	vmFound := false | ||||
| 	globalErr = nil | ||||
|  | ||||
| 	setGlobalErr := func(err error) { | ||||
| 		globalErrMutex.Lock() | ||||
| 		globalErr = &err | ||||
| 		globalErrMutex.Unlock() | ||||
| 	} | ||||
|  | ||||
| 	setVMFound := func(found bool) { | ||||
| 		mutex.Lock() | ||||
| 		vmFound = found | ||||
| 		mutex.Unlock() | ||||
| 	} | ||||
|  | ||||
| 	getVMFound := func() bool { | ||||
| 		mutex.Lock() | ||||
| 		found := vmFound | ||||
| 		mutex.Unlock() | ||||
| 		return found | ||||
| 	} | ||||
|  | ||||
| 	go func() { | ||||
| 		var datacenterObjs []*vclib.Datacenter | ||||
| 		for vc, vsi := range nm.vsphereInstanceMap { | ||||
|  | ||||
| 			found := getVMFound() | ||||
| 			if found { | ||||
| 				break | ||||
| 			} | ||||
|  | ||||
| 			// Create context | ||||
| 			ctx, cancel := context.WithCancel(context.Background()) | ||||
| 			defer cancel() | ||||
|  | ||||
| 			err := nm.vcConnect(ctx, vsi) | ||||
| 			if err != nil { | ||||
| 				klog.V(4).Info("Discovering node error vc:", err) | ||||
| 				setGlobalErr(err) | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			if vsi.cfg.Datacenters == "" { | ||||
| 				datacenterObjs, err = vclib.GetAllDatacenter(ctx, vsi.conn) | ||||
| 				if err != nil { | ||||
| 					klog.V(4).Info("Discovering node error dc:", err) | ||||
| 					setGlobalErr(err) | ||||
| 					continue | ||||
| 				} | ||||
| 			} else { | ||||
| 				datacenters := strings.Split(vsi.cfg.Datacenters, ",") | ||||
| 				for _, dc := range datacenters { | ||||
| 					dc = strings.TrimSpace(dc) | ||||
| 					if dc == "" { | ||||
| 						continue | ||||
| 					} | ||||
| 					datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc) | ||||
| 					if err != nil { | ||||
| 						klog.V(4).Info("Discovering node error dc:", err) | ||||
| 						setGlobalErr(err) | ||||
| 						continue | ||||
| 					} | ||||
| 					datacenterObjs = append(datacenterObjs, datacenterObj) | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			for _, datacenterObj := range datacenterObjs { | ||||
| 				found := getVMFound() | ||||
| 				if found { | ||||
| 					break | ||||
| 				} | ||||
|  | ||||
| 				klog.V(4).Infof("Finding node %s in vc=%s and datacenter=%s", node.Name, vc, datacenterObj.Name()) | ||||
| 				queueChannel <- &VmSearch{ | ||||
| 					vc:         vc, | ||||
| 					datacenter: datacenterObj, | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		close(queueChannel) | ||||
| 	}() | ||||
|  | ||||
| 	for i := 0; i < POOL_SIZE; i++ { | ||||
| 		wg.Add(1) | ||||
| 		go func() { | ||||
| 			for res := range queueChannel { | ||||
| 				ctx, cancel := context.WithCancel(context.Background()) | ||||
| 				vm, err := res.datacenter.GetVMByUUID(ctx, nodeUUID) | ||||
| 				if err != nil { | ||||
| 					klog.V(4).Infof("Error while looking for vm=%+v in vc=%s and datacenter=%s: %v", | ||||
| 						vm, res.vc, res.datacenter.Name(), err) | ||||
| 					if err != vclib.ErrNoVMFound { | ||||
| 						setGlobalErr(err) | ||||
| 					} else { | ||||
| 						klog.V(4).Infof("Did not find node %s in vc=%s and datacenter=%s", | ||||
| 							node.Name, res.vc, res.datacenter.Name()) | ||||
| 					} | ||||
| 					cancel() | ||||
| 					continue | ||||
| 				} | ||||
| 				if vm != nil { | ||||
| 					klog.V(4).Infof("Found node %s as vm=%+v in vc=%s and datacenter=%s", | ||||
| 						node.Name, vm, res.vc, res.datacenter.Name()) | ||||
| 					var vmObj mo.VirtualMachine | ||||
| 					err := vm.Properties(ctx, vm.Reference(), []string{"config"}, &vmObj) | ||||
| 					if err != nil || vmObj.Config == nil { | ||||
| 						klog.Errorf("failed to retrieve guest vmconfig for node: %s Err: %v", node.Name, err) | ||||
| 					} else { | ||||
| 						klog.V(4).Infof("vm hardware version for node:%s is %s", node.Name, vmObj.Config.Version) | ||||
| 						// vmconfig.Version returns vm hardware version as vmx-11, vmx-13, vmx-14, vmx-15 etc. | ||||
| 						vmhardwaredeprecated, err := isGuestHardwareVersionDeprecated(vmObj.Config.Version) | ||||
| 						if err != nil { | ||||
| 							klog.Errorf("failed to check if vm hardware version is deprecated. VM Hardware Version: %s Err: %v", vmObj.Config.Version, err) | ||||
| 						} | ||||
| 						if vmhardwaredeprecated { | ||||
| 							klog.Warningf("VM Hardware version: %s from node: %s is deprecated. Please consider upgrading virtual machine hardware version to vmx-15 or higher", vmObj.Config.Version, node.Name) | ||||
| 						} | ||||
| 					} | ||||
| 					// Get the node zone information | ||||
| 					nodeFd := node.ObjectMeta.Labels[v1.LabelTopologyZone] | ||||
| 					nodeRegion := node.ObjectMeta.Labels[v1.LabelTopologyRegion] | ||||
| 					nodeZone := &cloudprovider.Zone{FailureDomain: nodeFd, Region: nodeRegion} | ||||
| 					nodeInfo := &NodeInfo{dataCenter: res.datacenter, vm: vm, vcServer: res.vc, vmUUID: nodeUUID, zone: nodeZone} | ||||
| 					nm.addNodeInfo(node.ObjectMeta.Name, nodeInfo) | ||||
| 					for range queueChannel { | ||||
| 					} | ||||
| 					setVMFound(true) | ||||
| 					cancel() | ||||
| 					break | ||||
| 				} | ||||
| 				cancel() | ||||
| 			} | ||||
| 			wg.Done() | ||||
| 		}() | ||||
| 	} | ||||
| 	wg.Wait() | ||||
| 	if vmFound { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if globalErr != nil { | ||||
| 		return *globalErr | ||||
| 	} | ||||
|  | ||||
| 	klog.V(4).Infof("Discovery Node: %q vm not found", node.Name) | ||||
| 	return vclib.ErrNoVMFound | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) RegisterNode(node *v1.Node) error { | ||||
| 	nm.addNode(node) | ||||
| 	return nm.DiscoverNode(node) | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) UnRegisterNode(node *v1.Node) error { | ||||
| 	nm.removeNode(node) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) RediscoverNode(nodeName k8stypes.NodeName) error { | ||||
| 	node, err := nm.GetNode(nodeName) | ||||
|  | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nm.DiscoverNode(&node) | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) GetNode(nodeName k8stypes.NodeName) (v1.Node, error) { | ||||
| 	nm.registeredNodesLock.RLock() | ||||
| 	node := nm.registeredNodes[convertToString(nodeName)] | ||||
| 	nm.registeredNodesLock.RUnlock() | ||||
| 	if node != nil { | ||||
| 		klog.V(4).Infof("Node %s found in vSphere cloud provider cache", nodeName) | ||||
| 		return *node, nil | ||||
| 	} | ||||
|  | ||||
| 	if nm.nodeLister != nil { | ||||
| 		klog.V(4).Infof("Node %s missing in vSphere cloud provider cache, trying node informer", nodeName) | ||||
| 		node, err := nm.nodeLister.Get(convertToString(nodeName)) | ||||
| 		if err != nil { | ||||
| 			if !errors.IsNotFound(err) { | ||||
| 				return v1.Node{}, err | ||||
| 			} | ||||
| 			// Fall through with IsNotFound error and try to get the node from the API server | ||||
| 		} else { | ||||
| 			node := node.DeepCopy() | ||||
| 			nm.addNode(node) | ||||
| 			klog.V(4).Infof("Node %s found in vSphere cloud provider node informer", nodeName) | ||||
| 			return *node, nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if nm.nodeGetter != nil { | ||||
| 		klog.V(4).Infof("Node %s missing in vSphere cloud provider caches, trying the API server", nodeName) | ||||
| 		node, err := nm.nodeGetter.Nodes().Get(context.TODO(), convertToString(nodeName), metav1.GetOptions{}) | ||||
| 		if err != nil { | ||||
| 			if !errors.IsNotFound(err) { | ||||
| 				return v1.Node{}, err | ||||
| 			} | ||||
| 			// Fall through with IsNotFound error to keep the code consistent with the above | ||||
| 		} else { | ||||
| 			nm.addNode(node) | ||||
| 			klog.V(4).Infof("Node %s found in the API server", nodeName) | ||||
| 			return *node, nil | ||||
| 		} | ||||
| 	} | ||||
| 	klog.V(4).Infof("Node %s not found in vSphere cloud provider", nodeName) | ||||
| 	return v1.Node{}, vclib.ErrNoVMFound | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) getNodes() map[string]*v1.Node { | ||||
| 	nm.registeredNodesLock.RLock() | ||||
| 	defer nm.registeredNodesLock.RUnlock() | ||||
| 	registeredNodes := make(map[string]*v1.Node, len(nm.registeredNodes)) | ||||
| 	for nodeName, node := range nm.registeredNodes { | ||||
| 		registeredNodes[nodeName] = node | ||||
| 	} | ||||
| 	return registeredNodes | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) addNode(node *v1.Node) { | ||||
| 	nm.registeredNodesLock.Lock() | ||||
| 	nm.registeredNodes[node.ObjectMeta.Name] = node | ||||
| 	nm.registeredNodesLock.Unlock() | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) removeNode(node *v1.Node) { | ||||
| 	nm.registeredNodesLock.Lock() | ||||
| 	delete(nm.registeredNodes, node.ObjectMeta.Name) | ||||
| 	nm.registeredNodesLock.Unlock() | ||||
|  | ||||
| 	nm.nodeInfoLock.Lock() | ||||
| 	delete(nm.nodeInfoMap, node.ObjectMeta.Name) | ||||
| 	nm.nodeInfoLock.Unlock() | ||||
| } | ||||
|  | ||||
| // GetNodeInfo returns a NodeInfo which datacenter, vm and vc server ip address. | ||||
| // This method returns an error if it is unable find node VCs and DCs listed in vSphere.conf | ||||
| // NodeInfo returned may not be updated to reflect current VM location. | ||||
| // | ||||
| // This method is a getter but it can cause side-effect of updating NodeInfo object. | ||||
| func (nm *NodeManager) GetNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) { | ||||
| 	return nm.getRefreshedNodeInfo(nodeName) | ||||
| } | ||||
|  | ||||
| // GetNodeDetails returns NodeDetails for all the discovered nodes. | ||||
| // | ||||
| // This method is a getter but it can cause side-effect of updating NodeInfo objects. | ||||
| func (nm *NodeManager) GetNodeDetails() ([]NodeDetails, error) { | ||||
| 	var nodeDetails []NodeDetails | ||||
|  | ||||
| 	for nodeName, nodeObj := range nm.getNodes() { | ||||
| 		nodeInfo, err := nm.GetNodeInfoWithNodeObject(nodeObj) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		klog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) | ||||
| 		nodeDetails = append(nodeDetails, NodeDetails{nodeName, nodeInfo.vm, nodeInfo.vmUUID, nodeInfo.zone}) | ||||
| 	} | ||||
| 	return nodeDetails, nil | ||||
| } | ||||
|  | ||||
| // GetNodeNames returns list of nodes that are known to vsphere cloudprovider. | ||||
| // These are typically nodes that make up k8s cluster. | ||||
| func (nm *NodeManager) GetNodeNames() []k8stypes.NodeName { | ||||
| 	nodes := nm.getNodes() | ||||
| 	var nodeNameList []k8stypes.NodeName | ||||
| 	for _, node := range nodes { | ||||
| 		nodeNameList = append(nodeNameList, k8stypes.NodeName(node.Name)) | ||||
| 	} | ||||
| 	return nodeNameList | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) refreshNodes() (errList []error) { | ||||
| 	for nodeName := range nm.getNodes() { | ||||
| 		nodeInfo, err := nm.getRefreshedNodeInfo(convertToK8sType(nodeName)) | ||||
| 		if err != nil { | ||||
| 			errList = append(errList, err) | ||||
| 			continue | ||||
| 		} | ||||
| 		klog.V(4).Infof("Updated NodeInfo %v for node %q.", nodeInfo, nodeName) | ||||
| 	} | ||||
| 	return errList | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) getRefreshedNodeInfo(nodeName k8stypes.NodeName) (NodeInfo, error) { | ||||
| 	nodeInfo := nm.getNodeInfo(nodeName) | ||||
| 	var err error | ||||
| 	if nodeInfo == nil { | ||||
| 		// Rediscover node if no NodeInfo found. | ||||
| 		klog.V(4).Infof("No VM found for node %q. Initiating rediscovery.", convertToString(nodeName)) | ||||
| 		err = nm.RediscoverNode(nodeName) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Error %q node info for node %q not found", err, convertToString(nodeName)) | ||||
| 			return NodeInfo{}, err | ||||
| 		} | ||||
| 		nodeInfo = nm.getNodeInfo(nodeName) | ||||
| 	} else { | ||||
| 		// Renew the found NodeInfo to avoid stale vSphere connection. | ||||
| 		klog.V(4).Infof("Renewing NodeInfo %+v for node %q", nodeInfo, convertToString(nodeName)) | ||||
| 		nodeInfo, err = nm.renewNodeInfo(nodeInfo, true) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Error %q occurred while renewing NodeInfo for %q", err, convertToString(nodeName)) | ||||
| 			return NodeInfo{}, err | ||||
| 		} | ||||
| 		nm.addNodeInfo(convertToString(nodeName), nodeInfo) | ||||
| 	} | ||||
| 	return *nodeInfo, nil | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) addNodeInfo(nodeName string, nodeInfo *NodeInfo) { | ||||
| 	nm.nodeInfoLock.Lock() | ||||
| 	nm.nodeInfoMap[nodeName] = nodeInfo | ||||
| 	nm.nodeInfoLock.Unlock() | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) getNodeInfo(nodeName k8stypes.NodeName) *NodeInfo { | ||||
| 	nm.nodeInfoLock.RLock() | ||||
| 	nodeInfo := nm.nodeInfoMap[convertToString(nodeName)] | ||||
| 	nm.nodeInfoLock.RUnlock() | ||||
| 	return nodeInfo | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) GetVSphereInstance(nodeName k8stypes.NodeName) (VSphereInstance, error) { | ||||
| 	nodeInfo, err := nm.GetNodeInfo(nodeName) | ||||
| 	if err != nil { | ||||
| 		klog.V(4).Infof("node info for node %q not found", convertToString(nodeName)) | ||||
| 		return VSphereInstance{}, err | ||||
| 	} | ||||
| 	vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] | ||||
| 	if vsphereInstance == nil { | ||||
| 		return VSphereInstance{}, fmt.Errorf("vSphereInstance for vc server %q not found while looking for node %q", nodeInfo.vcServer, convertToString(nodeName)) | ||||
| 	} | ||||
| 	return *vsphereInstance, nil | ||||
| } | ||||
|  | ||||
| // renewNodeInfo renews vSphere connection, VirtualMachine and Datacenter for NodeInfo instance. | ||||
| func (nm *NodeManager) renewNodeInfo(nodeInfo *NodeInfo, reconnect bool) (*NodeInfo, error) { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	vsphereInstance := nm.vsphereInstanceMap[nodeInfo.vcServer] | ||||
| 	if vsphereInstance == nil { | ||||
| 		err := fmt.Errorf("vSphereInstance for vSphere %q not found while refershing NodeInfo for VM %q", nodeInfo.vcServer, nodeInfo.vm) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if reconnect { | ||||
| 		err := nm.vcConnect(ctx, vsphereInstance) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	vm := nodeInfo.vm.RenewVM(vsphereInstance.conn.Client) | ||||
| 	return &NodeInfo{ | ||||
| 		vm:         &vm, | ||||
| 		dataCenter: vm.Datacenter, | ||||
| 		vcServer:   nodeInfo.vcServer, | ||||
| 		vmUUID:     nodeInfo.vmUUID, | ||||
| 		zone:       nodeInfo.zone, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| func (nodeInfo *NodeInfo) VM() *vclib.VirtualMachine { | ||||
| 	if nodeInfo == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return nodeInfo.vm | ||||
| } | ||||
|  | ||||
| // vcConnect connects to vCenter with existing credentials | ||||
| // If credentials are invalid: | ||||
| //  1. It will fetch credentials from credentialManager | ||||
| //  2. Update the credentials | ||||
| //  3. Connects again to vCenter with fetched credentials | ||||
| func (nm *NodeManager) vcConnect(ctx context.Context, vsphereInstance *VSphereInstance) error { | ||||
| 	err := vsphereInstance.conn.Connect(ctx) | ||||
| 	if err == nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	credentialManager := nm.CredentialManager() | ||||
| 	if !vclib.IsInvalidCredentialsError(err) || credentialManager == nil { | ||||
| 		klog.Errorf("Cannot connect to vCenter with err: %v", err) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	klog.V(4).Infof("Invalid credentials. Cannot connect to server %q. Fetching credentials from secrets.", vsphereInstance.conn.Hostname) | ||||
|  | ||||
| 	// Get latest credentials from SecretCredentialManager | ||||
| 	credentials, err := credentialManager.GetCredential(vsphereInstance.conn.Hostname) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get credentials from Secret Credential Manager with err: %v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	vsphereInstance.conn.UpdateCredentials(credentials.User, credentials.Password) | ||||
| 	return vsphereInstance.conn.Connect(ctx) | ||||
| } | ||||
|  | ||||
| // GetNodeInfoWithNodeObject returns a NodeInfo which datacenter, vm and vc server ip address. | ||||
| // This method returns an error if it is unable find node VCs and DCs listed in vSphere.conf | ||||
| // NodeInfo returned may not be updated to reflect current VM location. | ||||
| // | ||||
| // This method is a getter but it can cause side-effect of updating NodeInfo object. | ||||
| func (nm *NodeManager) GetNodeInfoWithNodeObject(node *v1.Node) (NodeInfo, error) { | ||||
| 	return nm.getRefreshedNodeInfo(convertToK8sType(node.Name)) | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) CredentialManager() *SecretCredentialManager { | ||||
| 	nm.credentialManagerLock.Lock() | ||||
| 	defer nm.credentialManagerLock.Unlock() | ||||
| 	return nm.credentialManager | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) UpdateCredentialManager(credentialManager *SecretCredentialManager) { | ||||
| 	nm.credentialManagerLock.Lock() | ||||
| 	defer nm.credentialManagerLock.Unlock() | ||||
| 	nm.credentialManager = credentialManager | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) GetHostsInZone(ctx context.Context, zoneFailureDomain string) ([]*object.HostSystem, error) { | ||||
| 	klog.V(9).Infof("GetHostsInZone called with registeredNodes: %v", nm.registeredNodes) | ||||
| 	nodeDetails, err := nm.GetNodeDetails() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klog.V(4).Infof("Node Details: %v", nodeDetails) | ||||
| 	// Build a map of Host moRef to HostSystem | ||||
| 	hostMap := make(map[string]*object.HostSystem) | ||||
| 	for _, n := range nodeDetails { | ||||
| 		// Match the provided zone failure domain with the node. | ||||
| 		klog.V(9).Infof("Matching provided zone %s with node %s zone %s", zoneFailureDomain, n.NodeName, n.Zone.FailureDomain) | ||||
| 		if zoneFailureDomain == n.Zone.FailureDomain { | ||||
| 			host, err := n.vm.HostSystem(ctx) | ||||
| 			if err != nil { | ||||
| 				klog.Errorf("Failed to get host system for VM %s. err: %+v", n.vm, err) | ||||
| 				continue | ||||
| 			} | ||||
| 			hostMap[host.Reference().Value] = host | ||||
| 		} | ||||
| 	} | ||||
| 	// Build the unique list of hosts. | ||||
| 	hosts := make([]*object.HostSystem, 0) | ||||
| 	for _, value := range hostMap { | ||||
| 		hosts = append(hosts, value) | ||||
| 	} | ||||
| 	klog.V(4).Infof("GetHostsInZone %v returning: %v", zoneFailureDomain, hosts) | ||||
| 	return hosts, nil | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) SetNodeLister(nodeLister corelisters.NodeLister) { | ||||
| 	nm.nodeLister = nodeLister | ||||
| } | ||||
|  | ||||
| func (nm *NodeManager) SetNodeGetter(nodeGetter coreclients.NodesGetter) { | ||||
| 	nm.nodeGetter = nodeGetter | ||||
| } | ||||
| @@ -1,149 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2023 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"testing" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/client-go/informers" | ||||
| 	"k8s.io/client-go/kubernetes/fake" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| ) | ||||
|  | ||||
| // Annotation used to distinguish nodes in node cache / informer / API server | ||||
| const nodeAnnotation = "test" | ||||
|  | ||||
| func getNode(annotation string) *v1.Node { | ||||
| 	return &v1.Node{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name: "node1", | ||||
| 			Annotations: map[string]string{ | ||||
| 				nodeAnnotation: annotation, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestGetNode(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name           string | ||||
| 		cachedNodes    []*v1.Node | ||||
| 		informerNodes  []*v1.Node // "nil" means that the NodeManager has no nodeLister | ||||
| 		apiServerNodes []*v1.Node // "nil" means that the NodeManager has no nodeGetter | ||||
|  | ||||
| 		expectedNodeAnnotation string | ||||
| 		expectNotFound         bool | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:           "No cached node anywhere", | ||||
| 			cachedNodes:    []*v1.Node{}, | ||||
| 			informerNodes:  []*v1.Node{}, | ||||
| 			apiServerNodes: []*v1.Node{}, | ||||
| 			expectNotFound: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:           "No lister & getter", | ||||
| 			cachedNodes:    []*v1.Node{}, | ||||
| 			informerNodes:  nil, | ||||
| 			apiServerNodes: nil, | ||||
| 			expectNotFound: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                   "cache is used first", | ||||
| 			cachedNodes:            []*v1.Node{getNode("cache")}, | ||||
| 			informerNodes:          []*v1.Node{getNode("informer")}, | ||||
| 			apiServerNodes:         []*v1.Node{getNode("apiserver")}, | ||||
| 			expectedNodeAnnotation: "cache", | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                   "informer is used second", | ||||
| 			cachedNodes:            []*v1.Node{}, | ||||
| 			informerNodes:          []*v1.Node{getNode("informer")}, | ||||
| 			apiServerNodes:         []*v1.Node{getNode("apiserver")}, | ||||
| 			expectedNodeAnnotation: "informer", | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:                   "API server is used last", | ||||
| 			cachedNodes:            []*v1.Node{}, | ||||
| 			informerNodes:          []*v1.Node{}, | ||||
| 			apiServerNodes:         []*v1.Node{getNode("apiserver")}, | ||||
| 			expectedNodeAnnotation: "apiserver", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, test := range tests { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
|  | ||||
| 			// local NodeManager cache | ||||
| 			cache := make(map[string]*v1.Node) | ||||
| 			for _, node := range test.cachedNodes { | ||||
| 				cache[node.Name] = node | ||||
| 			} | ||||
|  | ||||
| 			// Client with apiServerNodes | ||||
| 			objs := []runtime.Object{} | ||||
| 			for _, node := range test.apiServerNodes { | ||||
| 				objs = append(objs, node) | ||||
| 			} | ||||
| 			client := fake.NewSimpleClientset(objs...) | ||||
| 			nodeGetter := client.CoreV1() | ||||
|  | ||||
| 			// Informer + nodeLister. Despite the client already has apiServerNodes, they won't appear in the | ||||
| 			// nodeLister, because the informer is never started. | ||||
| 			factory := informers.NewSharedInformerFactory(client, 0 /* no resync */) | ||||
| 			nodeInformer := factory.Core().V1().Nodes() | ||||
| 			for _, node := range test.informerNodes { | ||||
| 				nodeInformer.Informer().GetStore().Add(node) | ||||
| 			} | ||||
| 			nodeLister := nodeInformer.Lister() | ||||
|  | ||||
| 			nodeManager := NodeManager{ | ||||
| 				registeredNodes: cache, | ||||
| 			} | ||||
| 			if test.informerNodes != nil { | ||||
| 				nodeManager.SetNodeLister(nodeLister) | ||||
| 			} | ||||
| 			if test.apiServerNodes != nil { | ||||
| 				nodeManager.SetNodeGetter(nodeGetter) | ||||
| 			} | ||||
|  | ||||
| 			node, err := nodeManager.GetNode("node1") | ||||
| 			if test.expectNotFound && err != vclib.ErrNoVMFound { | ||||
| 				t.Errorf("Expected NotFound error, got: %v", err) | ||||
| 			} | ||||
| 			if !test.expectNotFound && err != nil { | ||||
| 				t.Errorf("Unexpected error: %s", err) | ||||
| 			} | ||||
|  | ||||
| 			if test.expectedNodeAnnotation != "" { | ||||
| 				if node.Annotations == nil { | ||||
| 					t.Errorf("Expected node with annotation %q, got nil", test.expectedNodeAnnotation) | ||||
| 				} else { | ||||
| 					if ann := node.Annotations[nodeAnnotation]; ann != test.expectedNodeAnnotation { | ||||
| 						t.Errorf("Expected node with annotation %q, got %q", test.expectedNodeAnnotation, ann) | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
| @@ -1,210 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2021 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/property" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| ) | ||||
|  | ||||
| type sharedDatastore struct { | ||||
| 	nodeManager         *NodeManager | ||||
| 	candidateDatastores []*vclib.DatastoreInfo | ||||
| } | ||||
|  | ||||
| type hostInfo struct { | ||||
| 	hostUUID   string | ||||
| 	hostMOID   string | ||||
| 	datacenter string | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	summary       = "summary" | ||||
| 	runtimeHost   = "summary.runtime.host" | ||||
| 	hostsProperty = "host" | ||||
| 	nameProperty  = "name" | ||||
| ) | ||||
|  | ||||
| func (shared *sharedDatastore) getSharedDatastore(ctcx context.Context) (*vclib.DatastoreInfo, error) { | ||||
| 	nodes := shared.nodeManager.getNodes() | ||||
|  | ||||
| 	// Segregate nodes according to VC-DC | ||||
| 	dcNodes := make(map[string][]NodeInfo) | ||||
| 	nodeHosts := make(map[string]hostInfo) | ||||
|  | ||||
| 	for nodeName, node := range nodes { | ||||
| 		nodeInfo, err := shared.nodeManager.GetNodeInfoWithNodeObject(node) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("unable to find node %s: %v", nodeName, err) | ||||
| 		} | ||||
| 		vcDC := nodeInfo.vcServer + nodeInfo.dataCenter.String() | ||||
| 		dcNodes[vcDC] = append(dcNodes[vcDC], nodeInfo) | ||||
| 	} | ||||
|  | ||||
| 	for vcDC, nodes := range dcNodes { | ||||
| 		var hostInfos []hostInfo | ||||
| 		var err error | ||||
| 		hostInfos, err = shared.getNodeHosts(ctcx, nodes, vcDC) | ||||
| 		if err != nil { | ||||
| 			if vclib.IsManagedObjectNotFoundError(err) { | ||||
| 				klog.Warningf("SharedHost.getSharedDatastore: batch fetching of hosts failed - switching to fetching them individually.") | ||||
| 				hostInfos, err = shared.getEachNodeHost(ctcx, nodes, vcDC) | ||||
| 				if err != nil { | ||||
| 					klog.Errorf("SharedHost.getSharedDatastore: error fetching node hosts individually: %v", err) | ||||
| 					return nil, err | ||||
| 				} | ||||
| 			} else { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 		} | ||||
| 		for _, host := range hostInfos { | ||||
| 			hostDCName := fmt.Sprintf("%s/%s", host.datacenter, host.hostMOID) | ||||
| 			nodeHosts[hostDCName] = host | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(nodeHosts) < 1 { | ||||
| 		msg := fmt.Sprintf("SharedHost.getSharedDatastore unable to find hosts associated with nodes") | ||||
| 		klog.Error(msg) | ||||
| 		return nil, fmt.Errorf("") | ||||
| 	} | ||||
|  | ||||
| 	for _, datastoreInfo := range shared.candidateDatastores { | ||||
| 		dataStoreHosts, err := shared.getAttachedHosts(ctcx, datastoreInfo.Datastore) | ||||
| 		if err != nil { | ||||
| 			msg := fmt.Sprintf("error finding attached hosts to datastore %s: %v", datastoreInfo.Name(), err) | ||||
| 			klog.Error(msg) | ||||
| 			return nil, fmt.Errorf(msg) | ||||
| 		} | ||||
| 		if shared.isIncluded(dataStoreHosts, nodeHosts) { | ||||
| 			return datastoreInfo, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("SharedHost.getSharedDatastore: unable to find any shared datastores") | ||||
| } | ||||
|  | ||||
| // check if all of the nodeHosts are included in the dataStoreHosts | ||||
| func (shared *sharedDatastore) isIncluded(dataStoreHosts []hostInfo, nodeHosts map[string]hostInfo) bool { | ||||
| 	result := true | ||||
| 	for _, host := range nodeHosts { | ||||
| 		hostFound := false | ||||
| 		for _, targetHost := range dataStoreHosts { | ||||
| 			if host.hostUUID == targetHost.hostUUID && host.hostMOID == targetHost.hostMOID { | ||||
| 				hostFound = true | ||||
| 			} | ||||
| 		} | ||||
| 		if !hostFound { | ||||
| 			result = false | ||||
| 		} | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| func (shared *sharedDatastore) getEachNodeHost(ctx context.Context, nodes []NodeInfo, dcVC string) ([]hostInfo, error) { | ||||
| 	var hosts []hostInfo | ||||
| 	for _, node := range nodes { | ||||
| 		host, err := node.vm.GetHost(ctx) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("SharedHost.getEachNodeHost: unable to find host for vm %s: %v", node.vm.InventoryPath, err) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		hosts = append(hosts, hostInfo{ | ||||
| 			hostUUID:   host.Summary.Hardware.Uuid, | ||||
| 			hostMOID:   host.Summary.Host.String(), | ||||
| 			datacenter: node.dataCenter.String(), | ||||
| 		}) | ||||
| 	} | ||||
| 	return hosts, nil | ||||
| } | ||||
|  | ||||
| func (shared *sharedDatastore) getNodeHosts(ctx context.Context, nodes []NodeInfo, dcVC string) ([]hostInfo, error) { | ||||
| 	var vmRefs []types.ManagedObjectReference | ||||
| 	if len(nodes) < 1 { | ||||
| 		return nil, fmt.Errorf("no nodes found for dc-vc: %s", dcVC) | ||||
| 	} | ||||
| 	var nodeInfo NodeInfo | ||||
| 	for _, n := range nodes { | ||||
| 		nodeInfo = n | ||||
| 		vmRefs = append(vmRefs, n.vm.Reference()) | ||||
| 	} | ||||
| 	pc := property.DefaultCollector(nodeInfo.dataCenter.Client()) | ||||
| 	var vmoList []mo.VirtualMachine | ||||
| 	err := pc.Retrieve(ctx, vmRefs, []string{nameProperty, runtimeHost}, &vmoList) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("SharedHost.getNodeHosts: unable to fetch vms from datacenter %s: %v", nodeInfo.dataCenter.String(), err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var hostMoList []mo.HostSystem | ||||
| 	var hostRefs []types.ManagedObjectReference | ||||
| 	for _, vmo := range vmoList { | ||||
| 		if vmo.Summary.Runtime.Host == nil { | ||||
| 			msg := fmt.Sprintf("SharedHost.getNodeHosts: no host associated with vm %s", vmo.Name) | ||||
| 			klog.Error(msg) | ||||
| 			return nil, fmt.Errorf(msg) | ||||
| 		} | ||||
| 		hostRefs = append(hostRefs, vmo.Summary.Runtime.Host.Reference()) | ||||
| 	} | ||||
| 	pc = property.DefaultCollector(nodeInfo.dataCenter.Client()) | ||||
| 	err = pc.Retrieve(ctx, hostRefs, []string{summary}, &hostMoList) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("SharedHost.getNodeHosts: unable to fetch hosts from datacenter %s: %v", nodeInfo.dataCenter.String(), err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var hosts []hostInfo | ||||
| 	for _, host := range hostMoList { | ||||
| 		hosts = append(hosts, hostInfo{hostMOID: host.Summary.Host.String(), hostUUID: host.Summary.Hardware.Uuid, datacenter: nodeInfo.dataCenter.String()}) | ||||
| 	} | ||||
| 	return hosts, nil | ||||
| } | ||||
|  | ||||
| func (shared *sharedDatastore) getAttachedHosts(ctx context.Context, datastore *vclib.Datastore) ([]hostInfo, error) { | ||||
| 	var ds mo.Datastore | ||||
|  | ||||
| 	pc := property.DefaultCollector(datastore.Client()) | ||||
| 	err := pc.RetrieveOne(ctx, datastore.Reference(), []string{hostsProperty}, &ds) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	mounts := make(map[types.ManagedObjectReference]types.DatastoreHostMount) | ||||
| 	var refs []types.ManagedObjectReference | ||||
| 	for _, host := range ds.Host { | ||||
| 		refs = append(refs, host.Key) | ||||
| 		mounts[host.Key] = host | ||||
| 	} | ||||
|  | ||||
| 	var hs []mo.HostSystem | ||||
| 	err = pc.Retrieve(ctx, refs, []string{summary}, &hs) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var hosts []hostInfo | ||||
| 	for _, h := range hs { | ||||
| 		hosts = append(hosts, hostInfo{hostUUID: h.Summary.Hardware.Uuid, hostMOID: h.Summary.Host.String()}) | ||||
| 	} | ||||
| 	return hosts, nil | ||||
|  | ||||
| } | ||||
| @@ -1,27 +0,0 @@ | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package testing | ||||
|  | ||||
| import ( | ||||
| 	// test dependencies for k8s.io/legacy-cloud-providers/vsphere | ||||
| 	// import this package to vendor test dependencies since go modules does not | ||||
| 	// vendor transitive test dependencies | ||||
| 	_ "github.com/vmware/govmomi/lookup/simulator" | ||||
| 	_ "github.com/vmware/govmomi/simulator" | ||||
| 	_ "github.com/vmware/govmomi/sts/simulator" | ||||
| 	_ "github.com/vmware/govmomi/vapi/simulator" | ||||
| ) | ||||
| @@ -1,237 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/tls" | ||||
| 	"encoding/pem" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 	neturl "net/url" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/session" | ||||
| 	"github.com/vmware/govmomi/sts" | ||||
| 	"github.com/vmware/govmomi/vim25" | ||||
| 	"github.com/vmware/govmomi/vim25/soap" | ||||
| 	"k8s.io/client-go/pkg/version" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // VSphereConnection contains information for connecting to vCenter | ||||
| type VSphereConnection struct { | ||||
| 	Client            *vim25.Client | ||||
| 	Username          string | ||||
| 	Password          string `datapolicy:"password"` | ||||
| 	Hostname          string | ||||
| 	Port              string | ||||
| 	CACert            string | ||||
| 	Thumbprint        string | ||||
| 	Insecure          bool | ||||
| 	RoundTripperCount uint | ||||
| 	credentialsLock   sync.Mutex | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	clientLock sync.Mutex | ||||
| ) | ||||
|  | ||||
| // Connect makes connection to vCenter and sets VSphereConnection.Client. | ||||
| // If connection.Client is already set, it obtains the existing user session. | ||||
| // if user session is not valid, connection.Client will be set to the new client. | ||||
| func (connection *VSphereConnection) Connect(ctx context.Context) error { | ||||
| 	var err error | ||||
| 	clientLock.Lock() | ||||
| 	defer clientLock.Unlock() | ||||
|  | ||||
| 	if connection.Client == nil { | ||||
| 		connection.Client, err = connection.NewClient(ctx) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Failed to create govmomi client. err: %+v", err) | ||||
| 			return err | ||||
| 		} | ||||
| 		setVCenterInfoMetric(connection) | ||||
| 		return nil | ||||
| 	} | ||||
| 	m := session.NewManager(connection.Client) | ||||
| 	userSession, err := m.UserSession(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error while obtaining user session. err: %+v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	if userSession != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	klog.Warningf("Creating new client session since the existing session is not valid or not authenticated") | ||||
|  | ||||
| 	connection.Client, err = connection.NewClient(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create govmomi client. err: %+v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Signer returns an sts.Signer for use with SAML token auth if connection is configured for such. | ||||
| // Returns nil if username/password auth is configured for the connection. | ||||
| func (connection *VSphereConnection) Signer(ctx context.Context, client *vim25.Client) (*sts.Signer, error) { | ||||
| 	// TODO: Add separate fields for certificate and private-key. | ||||
| 	// For now we can leave the config structs and validation as-is and | ||||
| 	// decide to use LoginByToken if the username value is PEM encoded. | ||||
| 	b, _ := pem.Decode([]byte(connection.Username)) | ||||
| 	if b == nil { | ||||
| 		return nil, nil | ||||
| 	} | ||||
|  | ||||
| 	cert, err := tls.X509KeyPair([]byte(connection.Username), []byte(connection.Password)) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to load X509 key pair. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	tokens, err := sts.NewClient(ctx, client) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create STS client. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	req := sts.TokenRequest{ | ||||
| 		Certificate: &cert, | ||||
| 		Delegatable: true, | ||||
| 	} | ||||
|  | ||||
| 	signer, err := tokens.Issue(ctx, req) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to issue SAML token. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return signer, nil | ||||
| } | ||||
|  | ||||
| // login calls SessionManager.LoginByToken if certificate and private key are configured, | ||||
| // otherwise calls SessionManager.Login with user and password. | ||||
| func (connection *VSphereConnection) login(ctx context.Context, client *vim25.Client) error { | ||||
| 	m := session.NewManager(client) | ||||
| 	connection.credentialsLock.Lock() | ||||
| 	defer connection.credentialsLock.Unlock() | ||||
|  | ||||
| 	signer, err := connection.Signer(ctx, client) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if signer == nil { | ||||
| 		klog.V(3).Infof("SessionManager.Login with username %q", connection.Username) | ||||
| 		return m.Login(ctx, neturl.UserPassword(connection.Username, connection.Password)) | ||||
| 	} | ||||
|  | ||||
| 	klog.V(3).Infof("SessionManager.LoginByToken with certificate %q", connection.Username) | ||||
|  | ||||
| 	header := soap.Header{Security: signer} | ||||
|  | ||||
| 	return m.LoginByToken(client.WithHeader(ctx, header)) | ||||
| } | ||||
|  | ||||
| // Logout calls SessionManager.Logout for the given connection. | ||||
| func (connection *VSphereConnection) Logout(ctx context.Context) { | ||||
| 	clientLock.Lock() | ||||
| 	c := connection.Client | ||||
| 	clientLock.Unlock() | ||||
| 	if c == nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	m := session.NewManager(c) | ||||
|  | ||||
| 	hasActiveSession, err := m.SessionIsActive(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Logout failed: %s", err) | ||||
| 		return | ||||
| 	} | ||||
| 	if !hasActiveSession { | ||||
| 		klog.Errorf("No active session, cannot logout") | ||||
| 		return | ||||
| 	} | ||||
| 	if err := m.Logout(ctx); err != nil { | ||||
| 		klog.Errorf("Logout failed: %s", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewClient creates a new govmomi client for the VSphereConnection obj | ||||
| func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Client, error) { | ||||
| 	url, err := soap.ParseURL(net.JoinHostPort(connection.Hostname, connection.Port)) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to parse URL: %s. err: %+v", url, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	sc := soap.NewClient(url, connection.Insecure) | ||||
|  | ||||
| 	if ca := connection.CACert; ca != "" { | ||||
| 		if err := sc.SetRootCAs(ca); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	tpHost := connection.Hostname + ":" + connection.Port | ||||
| 	sc.SetThumbprint(tpHost, connection.Thumbprint) | ||||
|  | ||||
| 	client, err := vim25.NewClient(ctx, sc) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create new client. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	k8sVersion := version.Get().GitVersion | ||||
| 	client.UserAgent = fmt.Sprintf("kubernetes-cloudprovider/%s", k8sVersion) | ||||
|  | ||||
| 	err = connection.login(ctx, client) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klogV := klog.V(3) | ||||
| 	if klogV.Enabled() { | ||||
| 		s, err := session.NewManager(client).UserSession(ctx) | ||||
| 		if err == nil { | ||||
| 			klogV.Infof("New session ID for '%s' = %s", s.UserName, s.Key) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if connection.RoundTripperCount == 0 { | ||||
| 		connection.RoundTripperCount = RoundTripperDefaultCount | ||||
| 	} | ||||
| 	client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(connection.RoundTripperCount))) | ||||
| 	vcNotSupported, err := isvCenterNotSupported(client.ServiceContent.About.Version, client.ServiceContent.About.ApiVersion) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("failed to check if vCenter version:%v and api version: %s is supported or not. Error: %v", client.ServiceContent.About.Version, client.ServiceContent.About.ApiVersion, err) | ||||
| 	} | ||||
| 	if vcNotSupported { | ||||
| 		klog.Warningf("vCenter version (version: %q, api verson: %q) is not supported for CSI Migration. Please consider upgrading vCenter and ESXi servers to 7.0u2 or higher for migrating vSphere volumes to CSI.", client.ServiceContent.About.Version, client.ServiceContent.About.ApiVersion) | ||||
| 	} | ||||
| 	return client, nil | ||||
| } | ||||
|  | ||||
| // UpdateCredentials updates username and password. | ||||
| // Note: Updated username and password will be used when there is no session active | ||||
| func (connection *VSphereConnection) UpdateCredentials(username string, password string) { | ||||
| 	connection.credentialsLock.Lock() | ||||
| 	defer connection.credentialsLock.Unlock() | ||||
| 	connection.Username = username | ||||
| 	connection.Password = password | ||||
| } | ||||
| @@ -1,219 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib_test | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/sha1" | ||||
| 	"crypto/tls" | ||||
| 	"crypto/x509" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"net/http" | ||||
| 	"net/http/httptest" | ||||
| 	"net/url" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"testing" | ||||
|  | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| ) | ||||
|  | ||||
| func createTestServer( | ||||
| 	t *testing.T, | ||||
| 	caCertPath string, | ||||
| 	serverCertPath string, | ||||
| 	serverKeyPath string, | ||||
| 	handler http.HandlerFunc, | ||||
| ) (*httptest.Server, string) { | ||||
| 	caCertPEM, err := ioutil.ReadFile(caCertPath) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Could not read ca cert from file") | ||||
| 	} | ||||
|  | ||||
| 	serverCert, err := tls.LoadX509KeyPair(serverCertPath, serverKeyPath) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Could not load server cert and server key from files: %#v", err) | ||||
| 	} | ||||
|  | ||||
| 	certPool := x509.NewCertPool() | ||||
| 	if ok := certPool.AppendCertsFromPEM(caCertPEM); !ok { | ||||
| 		t.Fatalf("Cannot add CA to CAPool") | ||||
| 	} | ||||
|  | ||||
| 	server := httptest.NewUnstartedServer(http.HandlerFunc(handler)) | ||||
| 	server.TLS = &tls.Config{ | ||||
| 		Certificates: []tls.Certificate{ | ||||
| 			serverCert, | ||||
| 		}, | ||||
| 		RootCAs: certPool, | ||||
| 	} | ||||
|  | ||||
| 	// calculate the leaf certificate's fingerprint | ||||
| 	if len(server.TLS.Certificates) < 1 || len(server.TLS.Certificates[0].Certificate) < 1 { | ||||
| 		t.Fatal("Expected server.TLS.Certificates not to be empty") | ||||
| 	} | ||||
| 	x509LeafCert := server.TLS.Certificates[0].Certificate[0] | ||||
| 	var tpString string | ||||
| 	for i, b := range sha1.Sum(x509LeafCert) { | ||||
| 		if i > 0 { | ||||
| 			tpString += ":" | ||||
| 		} | ||||
| 		tpString += fmt.Sprintf("%02X", b) | ||||
| 	} | ||||
|  | ||||
| 	return server, tpString | ||||
| } | ||||
|  | ||||
| func TestWithValidCaCert(t *testing.T) { | ||||
| 	handler, verifyConnectionWasMade := getRequestVerifier(t) | ||||
|  | ||||
| 	server, _ := createTestServer(t, "./testdata/ca.pem", "./testdata/server.pem", "./testdata/server.key", handler) | ||||
| 	server.StartTLS() | ||||
| 	u := mustParseURL(t, server.URL) | ||||
|  | ||||
| 	connection := &vclib.VSphereConnection{ | ||||
| 		Hostname: u.Hostname(), | ||||
| 		Port:     u.Port(), | ||||
| 		CACert:   "./testdata/ca.pem", | ||||
| 	} | ||||
|  | ||||
| 	// Ignoring error here, because we only care about the TLS connection | ||||
| 	_, _ = connection.NewClient(context.Background()) | ||||
|  | ||||
| 	verifyConnectionWasMade() | ||||
| } | ||||
|  | ||||
| func TestWithVerificationWithWrongThumbprint(t *testing.T) { | ||||
| 	handler, _ := getRequestVerifier(t) | ||||
|  | ||||
| 	server, _ := createTestServer(t, "./testdata/ca.pem", "./testdata/server.pem", "./testdata/server.key", handler) | ||||
| 	server.StartTLS() | ||||
| 	u := mustParseURL(t, server.URL) | ||||
|  | ||||
| 	connection := &vclib.VSphereConnection{ | ||||
| 		Hostname:   u.Hostname(), | ||||
| 		Port:       u.Port(), | ||||
| 		Thumbprint: "obviously wrong", | ||||
| 	} | ||||
|  | ||||
| 	_, err := connection.NewClient(context.Background()) | ||||
|  | ||||
| 	if msg := err.Error(); !strings.Contains(msg, "thumbprint does not match") { | ||||
| 		t.Fatalf("Expected wrong thumbprint error, got '%s'", msg) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestWithVerificationWithoutCaCertOrThumbprint(t *testing.T) { | ||||
| 	handler, _ := getRequestVerifier(t) | ||||
|  | ||||
| 	server, _ := createTestServer(t, "./testdata/ca.pem", "./testdata/server.pem", "./testdata/server.key", handler) | ||||
| 	server.StartTLS() | ||||
| 	u := mustParseURL(t, server.URL) | ||||
|  | ||||
| 	connection := &vclib.VSphereConnection{ | ||||
| 		Hostname: u.Hostname(), | ||||
| 		Port:     u.Port(), | ||||
| 	} | ||||
|  | ||||
| 	_, err := connection.NewClient(context.Background()) | ||||
|  | ||||
| 	verifyWrappedX509UnkownAuthorityErr(t, err) | ||||
| } | ||||
|  | ||||
| func TestWithValidThumbprint(t *testing.T) { | ||||
| 	handler, verifyConnectionWasMade := getRequestVerifier(t) | ||||
|  | ||||
| 	server, thumbprint := | ||||
| 		createTestServer(t, "./testdata/ca.pem", "./testdata/server.pem", "./testdata/server.key", handler) | ||||
| 	server.StartTLS() | ||||
| 	u := mustParseURL(t, server.URL) | ||||
|  | ||||
| 	connection := &vclib.VSphereConnection{ | ||||
| 		Hostname:   u.Hostname(), | ||||
| 		Port:       u.Port(), | ||||
| 		Thumbprint: thumbprint, | ||||
| 	} | ||||
|  | ||||
| 	// Ignoring error here, because we only care about the TLS connection | ||||
| 	_, _ = connection.NewClient(context.Background()) | ||||
|  | ||||
| 	verifyConnectionWasMade() | ||||
| } | ||||
|  | ||||
| func TestWithInvalidCaCertPath(t *testing.T) { | ||||
| 	connection := &vclib.VSphereConnection{ | ||||
| 		Hostname: "should-not-matter", | ||||
| 		Port:     "27015", // doesn't matter, but has to be a valid port | ||||
| 		CACert:   "invalid-path", | ||||
| 	} | ||||
|  | ||||
| 	_, err := connection.NewClient(context.Background()) | ||||
| 	if _, ok := err.(*os.PathError); !ok { | ||||
| 		t.Fatalf("Expected an os.PathError, got: '%s' (%#v)", err.Error(), err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestInvalidCaCert(t *testing.T) { | ||||
| 	connection := &vclib.VSphereConnection{ | ||||
| 		Hostname: "should-not-matter", | ||||
| 		Port:     "27015", // doesn't matter, but has to be a valid port | ||||
| 		CACert:   "./testdata/invalid.pem", | ||||
| 	} | ||||
|  | ||||
| 	_, err := connection.NewClient(context.Background()) | ||||
|  | ||||
| 	if msg := err.Error(); !strings.Contains(msg, "invalid certificate") { | ||||
| 		t.Fatalf("Expected invalid certificate error, got '%s'", msg) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func verifyWrappedX509UnkownAuthorityErr(t *testing.T, err error) { | ||||
| 	urlErr, ok := err.(*url.Error) | ||||
| 	if !ok { | ||||
| 		t.Fatalf("Expected to receive an url.Error, got '%s' (%#v)", err.Error(), err) | ||||
| 	} | ||||
| 	var x509err x509.UnknownAuthorityError | ||||
| 	if !errors.As(urlErr.Err, &x509err) { | ||||
| 		t.Fatalf("Expected to receive a wrapped x509.UnknownAuthorityError, got: '%s' (%#v)", urlErr.Err.Error(), urlErr.Err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func getRequestVerifier(t *testing.T) (http.HandlerFunc, func()) { | ||||
| 	gotRequest := false | ||||
|  | ||||
| 	handler := func(w http.ResponseWriter, r *http.Request) { | ||||
| 		gotRequest = true | ||||
| 	} | ||||
|  | ||||
| 	checker := func() { | ||||
| 		if !gotRequest { | ||||
| 			t.Fatalf("Never saw a request, maybe TLS connection could not be established?") | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return handler, checker | ||||
| } | ||||
|  | ||||
| func mustParseURL(t *testing.T, i string) *url.URL { | ||||
| 	u, err := url.Parse(i) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Cannot parse URL: %v", err) | ||||
| 	} | ||||
| 	return u | ||||
| } | ||||
| @@ -1,65 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| // Volume Constants | ||||
| const ( | ||||
| 	ThinDiskType             = "thin" | ||||
| 	PreallocatedDiskType     = "preallocated" | ||||
| 	EagerZeroedThickDiskType = "eagerZeroedThick" | ||||
| 	ZeroedThickDiskType      = "zeroedThick" | ||||
| ) | ||||
|  | ||||
| // Controller Constants | ||||
| const ( | ||||
| 	SCSIControllerLimit       = 4 | ||||
| 	SCSIControllerDeviceLimit = 15 | ||||
| 	SCSIDeviceSlots           = 16 | ||||
| 	SCSIReservedSlot          = 7 | ||||
|  | ||||
| 	SCSIControllerType        = "scsi" | ||||
| 	LSILogicControllerType    = "lsiLogic" | ||||
| 	BusLogicControllerType    = "busLogic" | ||||
| 	LSILogicSASControllerType = "lsiLogic-sas" | ||||
| 	PVSCSIControllerType      = "pvscsi" | ||||
| ) | ||||
|  | ||||
| // Other Constants | ||||
| const ( | ||||
| 	LogLevel                   = 4 | ||||
| 	DatastoreProperty          = "datastore" | ||||
| 	ResourcePoolProperty       = "resourcePool" | ||||
| 	DatastoreInfoProperty      = "info" | ||||
| 	VirtualMachineType         = "VirtualMachine" | ||||
| 	RoundTripperDefaultCount   = 3 | ||||
| 	VSANDatastoreType          = "vsan" | ||||
| 	DummyVMPrefixName          = "vsphere-k8s" | ||||
| 	ActivePowerState           = "poweredOn" | ||||
| 	DatacenterType             = "Datacenter" | ||||
| 	ClusterComputeResourceType = "ClusterComputeResource" | ||||
| 	HostSystemType             = "HostSystem" | ||||
| 	NameProperty               = "name" | ||||
| 	MinvCenterVersion          = "7.0.2" | ||||
| ) | ||||
|  | ||||
| // Test Constants | ||||
| const ( | ||||
| 	TestDefaultDatacenter = "DC0" | ||||
| 	TestDefaultDatastore  = "LocalDS_0" | ||||
| 	TestDefaultNetwork    = "VM Network" | ||||
| 	testNameNotFound      = "enoent" | ||||
| ) | ||||
| @@ -1,37 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import "errors" | ||||
|  | ||||
| // Error Messages | ||||
| const ( | ||||
| 	FileAlreadyExistErrMsg = "File requested already exist" | ||||
| 	NoDiskUUIDFoundErrMsg  = "No disk UUID found" | ||||
| 	NoDevicesFoundErrMsg   = "No devices found" | ||||
| 	DiskNotFoundErrMsg     = "No vSphere disk ID found" | ||||
| 	NoVMFoundErrMsg        = "No VM found" | ||||
| ) | ||||
|  | ||||
| // Error constants | ||||
| var ( | ||||
| 	ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg) | ||||
| 	ErrNoDiskUUIDFound  = errors.New(NoDiskUUIDFoundErrMsg) | ||||
| 	ErrNoDevicesFound   = errors.New(NoDevicesFoundErrMsg) | ||||
| 	ErrNoDiskIDFound    = errors.New(DiskNotFoundErrMsg) | ||||
| 	ErrNoVMFound        = errors.New(NoVMFoundErrMsg) | ||||
| ) | ||||
| @@ -1,370 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/find" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/property" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // Datacenter extends the govmomi Datacenter object | ||||
| type Datacenter struct { | ||||
| 	*object.Datacenter | ||||
| } | ||||
|  | ||||
| // GetDatacenter returns the DataCenter Object for the given datacenterPath | ||||
| // If datacenter is located in a folder, include full path to datacenter else just provide the datacenter name | ||||
| func GetDatacenter(ctx context.Context, connection *VSphereConnection, datacenterPath string) (*Datacenter, error) { | ||||
| 	finder := find.NewFinder(connection.Client, false) | ||||
| 	datacenter, err := finder.Datacenter(ctx, datacenterPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to find the datacenter: %s. err: %+v", datacenterPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	dc := Datacenter{datacenter} | ||||
| 	return &dc, nil | ||||
| } | ||||
|  | ||||
| // GetAllDatacenter returns all the DataCenter Objects | ||||
| func GetAllDatacenter(ctx context.Context, connection *VSphereConnection) ([]*Datacenter, error) { | ||||
| 	var dc []*Datacenter | ||||
| 	finder := find.NewFinder(connection.Client, false) | ||||
| 	datacenters, err := finder.DatacenterList(ctx, "*") | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to find the datacenter. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	for _, datacenter := range datacenters { | ||||
| 		dc = append(dc, &(Datacenter{datacenter})) | ||||
| 	} | ||||
|  | ||||
| 	return dc, nil | ||||
| } | ||||
|  | ||||
| // GetVMByUUID gets the VM object from the given vmUUID | ||||
| func (dc *Datacenter) GetVMByUUID(ctx context.Context, vmUUID string) (*VirtualMachine, error) { | ||||
| 	s := object.NewSearchIndex(dc.Client()) | ||||
| 	vmUUID = strings.ToLower(strings.TrimSpace(vmUUID)) | ||||
| 	svm, err := s.FindByUuid(ctx, dc.Datacenter, vmUUID, true, nil) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to find VM by UUID. VM UUID: %s, err: %+v", vmUUID, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if svm == nil { | ||||
| 		klog.Errorf("Unable to find VM by UUID. VM UUID: %s", vmUUID) | ||||
| 		return nil, ErrNoVMFound | ||||
| 	} | ||||
| 	virtualMachine := VirtualMachine{object.NewVirtualMachine(dc.Client(), svm.Reference()), dc} | ||||
| 	return &virtualMachine, nil | ||||
| } | ||||
|  | ||||
| // GetHostByVMUUID gets the host object from the given vmUUID | ||||
| func (dc *Datacenter) GetHostByVMUUID(ctx context.Context, vmUUID string) (*types.ManagedObjectReference, error) { | ||||
| 	virtualMachine, err := dc.GetVMByUUID(ctx, vmUUID) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var vmMo mo.VirtualMachine | ||||
| 	pc := property.DefaultCollector(virtualMachine.Client()) | ||||
| 	err = pc.RetrieveOne(ctx, virtualMachine.Reference(), []string{"summary.runtime.host"}, &vmMo) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve VM runtime host, err: %v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	host := vmMo.Summary.Runtime.Host | ||||
| 	klog.Infof("%s host is %s", virtualMachine.Reference(), host) | ||||
| 	return host, nil | ||||
| } | ||||
|  | ||||
| // GetVMByPath gets the VM object from the given vmPath | ||||
| // vmPath should be the full path to VM and not just the name | ||||
| func (dc *Datacenter) GetVMByPath(ctx context.Context, vmPath string) (*VirtualMachine, error) { | ||||
| 	finder := getFinder(dc) | ||||
| 	vm, err := finder.VirtualMachine(ctx, vmPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to find VM by Path. VM Path: %s, err: %+v", vmPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	virtualMachine := VirtualMachine{vm, dc} | ||||
| 	return &virtualMachine, nil | ||||
| } | ||||
|  | ||||
| // GetAllDatastores gets the datastore URL to DatastoreInfo map for all the datastores in | ||||
| // the datacenter. | ||||
| func (dc *Datacenter) GetAllDatastores(ctx context.Context) (map[string]*DatastoreInfo, error) { | ||||
| 	finder := getFinder(dc) | ||||
| 	datastores, err := finder.DatastoreList(ctx, "*") | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get all the datastores. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var dsList []types.ManagedObjectReference | ||||
| 	for _, ds := range datastores { | ||||
| 		dsList = append(dsList, ds.Reference()) | ||||
| 	} | ||||
|  | ||||
| 	var dsMoList []mo.Datastore | ||||
| 	pc := property.DefaultCollector(dc.Client()) | ||||
| 	properties := []string{DatastoreInfoProperty} | ||||
| 	err = pc.Retrieve(ctx, dsList, properties, &dsMoList) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ | ||||
| 			" dsObjList: %+v, properties: %+v, err: %v", dsList, properties, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	dsURLInfoMap := make(map[string]*DatastoreInfo) | ||||
| 	for _, dsMo := range dsMoList { | ||||
| 		dsURLInfoMap[dsMo.Info.GetDatastoreInfo().Url] = &DatastoreInfo{ | ||||
| 			&Datastore{object.NewDatastore(dc.Client(), dsMo.Reference()), | ||||
| 				dc}, | ||||
| 			dsMo.Info.GetDatastoreInfo()} | ||||
| 	} | ||||
| 	klog.V(9).Infof("dsURLInfoMap : %+v", dsURLInfoMap) | ||||
| 	return dsURLInfoMap, nil | ||||
| } | ||||
|  | ||||
| // GetAllHosts returns all the host objects in this datacenter of VC | ||||
| func (dc *Datacenter) GetAllHosts(ctx context.Context) ([]types.ManagedObjectReference, error) { | ||||
| 	finder := getFinder(dc) | ||||
| 	hostSystems, err := finder.HostSystemList(ctx, "*") | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get all hostSystems. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var hostMors []types.ManagedObjectReference | ||||
| 	for _, hs := range hostSystems { | ||||
| 		hostMors = append(hostMors, hs.Reference()) | ||||
| 	} | ||||
| 	return hostMors, nil | ||||
| } | ||||
|  | ||||
| // GetDatastoreByPath gets the Datastore object from the given vmDiskPath | ||||
| func (dc *Datacenter) GetDatastoreByPath(ctx context.Context, vmDiskPath string) (*Datastore, error) { | ||||
| 	datastorePathObj := new(object.DatastorePath) | ||||
| 	isSuccess := datastorePathObj.FromString(vmDiskPath) | ||||
| 	if !isSuccess { | ||||
| 		klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) | ||||
| 		return nil, errors.New("Failed to parse vmDiskPath") | ||||
| 	} | ||||
|  | ||||
| 	return dc.GetDatastoreByName(ctx, datastorePathObj.Datastore) | ||||
| } | ||||
|  | ||||
| // GetDatastoreByName gets the Datastore object for the given datastore name | ||||
| func (dc *Datacenter) GetDatastoreByName(ctx context.Context, name string) (*Datastore, error) { | ||||
| 	finder := getFinder(dc) | ||||
| 	ds, err := finder.Datastore(ctx, name) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	datastore := Datastore{ds, dc} | ||||
| 	return &datastore, nil | ||||
| } | ||||
|  | ||||
| // GetDatastoreInfoByName gets the Datastore object for the given datastore name | ||||
| func (dc *Datacenter) GetDatastoreInfoByName(ctx context.Context, name string) (*DatastoreInfo, error) { | ||||
| 	finder := getFinder(dc) | ||||
| 	ds, err := finder.Datastore(ctx, name) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed while searching for datastore: %s. err: %+v", name, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	datastore := Datastore{ds, dc} | ||||
| 	var dsMo mo.Datastore | ||||
| 	pc := property.DefaultCollector(dc.Client()) | ||||
| 	properties := []string{DatastoreInfoProperty} | ||||
| 	err = pc.RetrieveOne(ctx, ds.Reference(), properties, &dsMo) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get Datastore managed objects from datastore reference."+ | ||||
| 			" dsRef: %+v, err: %+v", ds.Reference(), err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klog.V(9).Infof("Result dsMo: %+v", dsMo) | ||||
| 	return &DatastoreInfo{Datastore: &datastore, Info: dsMo.Info.GetDatastoreInfo()}, nil | ||||
| } | ||||
|  | ||||
| // GetResourcePool gets the resource pool for the given path | ||||
| func (dc *Datacenter) GetResourcePool(ctx context.Context, resourcePoolPath string) (*object.ResourcePool, error) { | ||||
| 	finder := getFinder(dc) | ||||
| 	var resourcePool *object.ResourcePool | ||||
| 	var err error | ||||
| 	resourcePool, err = finder.ResourcePoolOrDefault(ctx, resourcePoolPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get the ResourcePool for path '%s'. err: %+v", resourcePoolPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return resourcePool, nil | ||||
| } | ||||
|  | ||||
| // GetFolderByPath gets the Folder Object from the given folder path | ||||
| // folderPath should be the full path to folder | ||||
| func (dc *Datacenter) GetFolderByPath(ctx context.Context, folderPath string) (*Folder, error) { | ||||
| 	finder := getFinder(dc) | ||||
| 	vmFolder, err := finder.Folder(ctx, folderPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get the folder reference for %s. err: %+v", folderPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	folder := Folder{vmFolder, dc} | ||||
| 	return &folder, nil | ||||
| } | ||||
|  | ||||
| // GetVMMoList gets the VM Managed Objects with the given properties from the VM object | ||||
| func (dc *Datacenter) GetVMMoList(ctx context.Context, vmObjList []*VirtualMachine, properties []string) ([]mo.VirtualMachine, error) { | ||||
| 	var vmMoList []mo.VirtualMachine | ||||
| 	var vmRefs []types.ManagedObjectReference | ||||
| 	if len(vmObjList) < 1 { | ||||
| 		klog.Errorf("VirtualMachine Object list is empty") | ||||
| 		return nil, fmt.Errorf("VirtualMachine Object list is empty") | ||||
| 	} | ||||
|  | ||||
| 	for _, vmObj := range vmObjList { | ||||
| 		vmRefs = append(vmRefs, vmObj.Reference()) | ||||
| 	} | ||||
| 	pc := property.DefaultCollector(dc.Client()) | ||||
| 	err := pc.Retrieve(ctx, vmRefs, properties, &vmMoList) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get VM managed objects from VM objects. vmObjList: %+v, properties: %+v, err: %v", vmObjList, properties, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return vmMoList, nil | ||||
| } | ||||
|  | ||||
| // GetVirtualDiskPage83Data gets the virtual disk UUID by diskPath | ||||
| func (dc *Datacenter) GetVirtualDiskPage83Data(ctx context.Context, diskPath string) (string, error) { | ||||
| 	if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" { | ||||
| 		diskPath += ".vmdk" | ||||
| 	} | ||||
| 	vdm := object.NewVirtualDiskManager(dc.Client()) | ||||
| 	// Returns uuid of vmdk virtual disk | ||||
| 	diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc.Datacenter) | ||||
|  | ||||
| 	if err != nil { | ||||
| 		klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	diskUUID = formatVirtualDiskUUID(diskUUID) | ||||
| 	return diskUUID, nil | ||||
| } | ||||
|  | ||||
| // GetDatastoreMoList gets the Datastore Managed Objects with the given properties from the datastore objects | ||||
| func (dc *Datacenter) GetDatastoreMoList(ctx context.Context, dsObjList []*Datastore, properties []string) ([]mo.Datastore, error) { | ||||
| 	var dsMoList []mo.Datastore | ||||
| 	var dsRefs []types.ManagedObjectReference | ||||
| 	if len(dsObjList) < 1 { | ||||
| 		klog.Errorf("Datastore Object list is empty") | ||||
| 		return nil, fmt.Errorf("Datastore Object list is empty") | ||||
| 	} | ||||
|  | ||||
| 	for _, dsObj := range dsObjList { | ||||
| 		dsRefs = append(dsRefs, dsObj.Reference()) | ||||
| 	} | ||||
| 	pc := property.DefaultCollector(dc.Client()) | ||||
| 	err := pc.Retrieve(ctx, dsRefs, properties, &dsMoList) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get Datastore managed objects from datastore objects. dsObjList: %+v, properties: %+v, err: %v", dsObjList, properties, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return dsMoList, nil | ||||
| } | ||||
|  | ||||
| // CheckDisksAttached checks if the disk is attached to node. | ||||
| // This is done by comparing the volume path with the backing.FilePath on the VM Virtual disk devices. | ||||
| func (dc *Datacenter) CheckDisksAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) { | ||||
| 	attached := make(map[string]map[string]bool) | ||||
| 	var vmList []*VirtualMachine | ||||
| 	for nodeName, volPaths := range nodeVolumes { | ||||
| 		for _, volPath := range volPaths { | ||||
| 			setNodeVolumeMap(attached, volPath, nodeName, false) | ||||
| 		} | ||||
| 		vm, err := dc.GetVMByPath(ctx, nodeName) | ||||
| 		if err != nil { | ||||
| 			if IsNotFound(err) { | ||||
| 				klog.Warningf("Node %q does not exist, vSphere CP will assume disks %v are not attached to it.", nodeName, volPaths) | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
| 		vmList = append(vmList, vm) | ||||
| 	} | ||||
| 	if len(vmList) == 0 { | ||||
| 		klog.V(2).Infof("vSphere CP will assume no disks are attached to any node.") | ||||
| 		return attached, nil | ||||
| 	} | ||||
| 	vmMoList, err := dc.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name"}) | ||||
| 	if err != nil { | ||||
| 		// When there is an error fetching instance information | ||||
| 		// it is safer to return nil and let volume information not be touched. | ||||
| 		klog.Errorf("Failed to get VM Managed object for nodes: %+v. err: +%v", vmList, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	for _, vmMo := range vmMoList { | ||||
| 		if vmMo.Config == nil { | ||||
| 			klog.Errorf("Config is not available for VM: %q", vmMo.Name) | ||||
| 			continue | ||||
| 		} | ||||
| 		for nodeName, volPaths := range nodeVolumes { | ||||
| 			if nodeName == vmMo.Name { | ||||
| 				verifyVolumePathsForVM(vmMo, volPaths, attached) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return attached, nil | ||||
| } | ||||
|  | ||||
| // VerifyVolumePathsForVM verifies if the volume paths (volPaths) are attached to VM. | ||||
| func verifyVolumePathsForVM(vmMo mo.VirtualMachine, volPaths []string, nodeVolumeMap map[string]map[string]bool) { | ||||
| 	// Verify if the volume paths are present on the VM backing virtual disk devices | ||||
| 	for _, volPath := range volPaths { | ||||
| 		vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device) | ||||
| 		for _, device := range vmDevices { | ||||
| 			if vmDevices.TypeName(device) == "VirtualDisk" { | ||||
| 				virtualDevice := device.GetVirtualDevice() | ||||
| 				if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { | ||||
| 					if backing.FileName == volPath { | ||||
| 						setNodeVolumeMap(nodeVolumeMap, volPath, vmMo.Name, true) | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func setNodeVolumeMap( | ||||
| 	nodeVolumeMap map[string]map[string]bool, | ||||
| 	volumePath string, | ||||
| 	nodeName string, | ||||
| 	check bool) { | ||||
| 	volumeMap := nodeVolumeMap[nodeName] | ||||
| 	if volumeMap == nil { | ||||
| 		volumeMap = make(map[string]bool) | ||||
| 		nodeVolumeMap[nodeName] = volumeMap | ||||
| 	} | ||||
| 	volumeMap[volumePath] = check | ||||
| } | ||||
| @@ -1,179 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/simulator" | ||||
| ) | ||||
|  | ||||
| func TestDatacenter(t *testing.T) { | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	// vCenter model + initial set of objects (cluster, hosts, VMs, network, datastore, etc) | ||||
| 	model := simulator.VPX() | ||||
|  | ||||
| 	defer model.Remove() | ||||
| 	err := model.Create() | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	s := model.Service.NewServer() | ||||
| 	defer s.Close() | ||||
|  | ||||
| 	avm := simulator.Map.Any(VirtualMachineType).(*simulator.VirtualMachine) | ||||
|  | ||||
| 	c, err := govmomi.NewClient(ctx, s.URL, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	vc := &VSphereConnection{Client: c.Client} | ||||
|  | ||||
| 	_, err = GetDatacenter(ctx, vc, testNameNotFound) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetVMByUUID(ctx, testNameNotFound) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetVMByUUID(ctx, avm.Summary.Config.Uuid) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetVMByPath(ctx, testNameNotFound) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	vm, err := dc.GetVMByPath(ctx, TestDefaultDatacenter+"/vm/"+avm.Name) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetDatastoreByPath(ctx, testNameNotFound) // invalid format | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	invalidPath := object.DatastorePath{ | ||||
| 		Datastore: testNameNotFound, | ||||
| 		Path:      testNameNotFound, | ||||
| 	} | ||||
| 	_, err = dc.GetDatastoreByPath(ctx, invalidPath.String()) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetDatastoreByPath(ctx, avm.Summary.Config.VmPathName) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetDatastoreByName(ctx, testNameNotFound) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	ds, err := dc.GetDatastoreByName(ctx, TestDefaultDatastore) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetFolderByPath(ctx, testNameNotFound) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetFolderByPath(ctx, TestDefaultDatacenter+"/vm") | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetVMMoList(ctx, nil, nil) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{testNameNotFound}) // invalid property | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"}) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	diskPath := ds.Path(avm.Name + "/disk1.vmdk") | ||||
|  | ||||
| 	_, err = dc.GetVirtualDiskPage83Data(ctx, diskPath+testNameNotFound) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetVirtualDiskPage83Data(ctx, diskPath) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetDatastoreMoList(ctx, nil, nil) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetDatastoreMoList(ctx, []*Datastore{ds}, []string{testNameNotFound}) // invalid property | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error") | ||||
| 	} | ||||
|  | ||||
| 	_, err = dc.GetDatastoreMoList(ctx, []*Datastore{ds}, []string{DatastoreInfoProperty}) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	nodeVolumes := map[string][]string{ | ||||
| 		avm.Name: {testNameNotFound, diskPath}, | ||||
| 	} | ||||
|  | ||||
| 	attached, err := dc.CheckDisksAttached(ctx, nodeVolumes) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	if attached[avm.Name][testNameNotFound] { | ||||
| 		t.Error("should not be attached") | ||||
| 	} | ||||
|  | ||||
| 	if !attached[avm.Name][diskPath] { | ||||
| 		t.Errorf("%s should be attached", diskPath) | ||||
| 	} | ||||
| } | ||||
| @@ -1,109 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/property" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	"github.com/vmware/govmomi/vim25/soap" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // Datastore extends the govmomi Datastore object | ||||
| type Datastore struct { | ||||
| 	*object.Datastore | ||||
| 	Datacenter *Datacenter | ||||
| } | ||||
|  | ||||
| // DatastoreInfo is a structure to store the Datastore and it's Info. | ||||
| type DatastoreInfo struct { | ||||
| 	*Datastore | ||||
| 	Info *types.DatastoreInfo | ||||
| } | ||||
|  | ||||
| func (di DatastoreInfo) String() string { | ||||
| 	return fmt.Sprintf("Datastore: %+v, datastore URL: %s", di.Datastore, di.Info.Url) | ||||
| } | ||||
|  | ||||
| // CreateDirectory creates the directory at location specified by directoryPath. | ||||
| // If the intermediate level folders do not exist, and the parameter createParents is true, all the non-existent folders are created. | ||||
| // directoryPath must be in the format "[vsanDatastore] kubevols" | ||||
| func (ds *Datastore) CreateDirectory(ctx context.Context, directoryPath string, createParents bool) error { | ||||
| 	fileManager := object.NewFileManager(ds.Client()) | ||||
| 	err := fileManager.MakeDirectory(ctx, directoryPath, ds.Datacenter.Datacenter, createParents) | ||||
| 	if err != nil { | ||||
| 		if soap.IsSoapFault(err) { | ||||
| 			soapFault := soap.ToSoapFault(err) | ||||
| 			if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok { | ||||
| 				return ErrFileAlreadyExist | ||||
| 			} | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
| 	klog.V(LogLevel).Infof("Created dir with path as %+q", directoryPath) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // GetType returns the type of datastore | ||||
| func (ds *Datastore) GetType(ctx context.Context) (string, error) { | ||||
| 	var dsMo mo.Datastore | ||||
| 	pc := property.DefaultCollector(ds.Client()) | ||||
| 	err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"summary"}, &dsMo) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve datastore summary property. err: %v", err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return dsMo.Summary.Type, nil | ||||
| } | ||||
|  | ||||
| // IsCompatibleWithStoragePolicy returns true if datastore is compatible with given storage policy else return false | ||||
| // for not compatible datastore, fault message is also returned | ||||
| func (ds *Datastore) IsCompatibleWithStoragePolicy(ctx context.Context, storagePolicyID string) (bool, string, error) { | ||||
| 	pbmClient, err := NewPbmClient(ctx, ds.Client()) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get new PbmClient Object. err: %v", err) | ||||
| 		return false, "", err | ||||
| 	} | ||||
| 	return pbmClient.IsDatastoreCompatible(ctx, storagePolicyID, ds) | ||||
| } | ||||
|  | ||||
| // GetDatastoreHostMounts gets the host names mounted on given datastore | ||||
| func (ds *Datastore) GetDatastoreHostMounts(ctx context.Context) ([]types.ManagedObjectReference, error) { | ||||
| 	var dsMo mo.Datastore | ||||
| 	pc := property.DefaultCollector(ds.Client()) | ||||
| 	err := pc.RetrieveOne(ctx, ds.Datastore.Reference(), []string{"host"}, &dsMo) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve datastore host mount property. err: %v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	hosts := make([]types.ManagedObjectReference, len(dsMo.Host)) | ||||
| 	for i, dsHostMount := range dsMo.Host { | ||||
| 		hosts[i] = dsHostMount.Key | ||||
| 	} | ||||
| 	return hosts, nil | ||||
| } | ||||
|  | ||||
| // Exists returns whether the given file exists in this datastore | ||||
| func (ds *Datastore) Exists(ctx context.Context, file string) bool { | ||||
| 	_, err := ds.Datastore.Stat(ctx, file) | ||||
| 	return err == nil | ||||
| } | ||||
| @@ -1,91 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/simulator" | ||||
| ) | ||||
|  | ||||
| func TestDatastore(t *testing.T) { | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	// vCenter model + initial set of objects (cluster, hosts, VMs, network, datastore, etc) | ||||
| 	model := simulator.VPX() | ||||
|  | ||||
| 	defer model.Remove() | ||||
| 	err := model.Create() | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	s := model.Service.NewServer() | ||||
| 	defer s.Close() | ||||
|  | ||||
| 	c, err := govmomi.NewClient(ctx, s.URL, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	vc := &VSphereConnection{Client: c.Client} | ||||
|  | ||||
| 	dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	all, err := dc.GetAllDatastores(ctx) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	for _, info := range all { | ||||
| 		ds := info.Datastore | ||||
| 		kind, cerr := ds.GetType(ctx) | ||||
| 		if cerr != nil { | ||||
| 			t.Error(err) | ||||
| 		} | ||||
| 		if kind == "" { | ||||
| 			t.Error("empty Datastore type") | ||||
| 		} | ||||
|  | ||||
| 		dir := object.DatastorePath{ | ||||
| 			Datastore: info.Info.Name, | ||||
| 			Path:      "kubevols", | ||||
| 		} | ||||
|  | ||||
| 		// TODO: test Datastore.IsCompatibleWithStoragePolicy (vcsim needs PBM support) | ||||
|  | ||||
| 		for _, fail := range []bool{false, true} { | ||||
| 			cerr = ds.CreateDirectory(ctx, dir.String(), false) | ||||
| 			if fail { | ||||
| 				if cerr != ErrFileAlreadyExist { | ||||
| 					t.Errorf("expected %s, got: %s", ErrFileAlreadyExist, cerr) | ||||
| 				} | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			if cerr != nil { | ||||
| 				t.Error(err) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @@ -1,105 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package diskmanagers | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| ) | ||||
|  | ||||
| // virtualDiskManager implements VirtualDiskProvider Interface for creating and deleting volume using VirtualDiskManager | ||||
| type virtualDiskManager struct { | ||||
| 	diskPath      string | ||||
| 	volumeOptions *vclib.VolumeOptions | ||||
| } | ||||
|  | ||||
| // Create implements Disk's Create interface | ||||
| // Contains implementation of virtualDiskManager based Provisioning | ||||
| func (diskManager virtualDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) { | ||||
| 	if diskManager.volumeOptions.SCSIControllerType == "" { | ||||
| 		diskManager.volumeOptions.SCSIControllerType = vclib.LSILogicControllerType | ||||
| 	} | ||||
|  | ||||
| 	// Check for existing VMDK before attempting create. Because a name collision | ||||
| 	// is unlikely, "VMDK already exists" is likely from a previous attempt to | ||||
| 	// create this volume. | ||||
| 	if dsPath := vclib.GetPathFromVMDiskPath(diskManager.diskPath); datastore.Exists(ctx, dsPath) { | ||||
| 		klog.V(2).Infof("Create: VirtualDisk already exists, returning success. Name=%q", diskManager.diskPath) | ||||
| 		return diskManager.diskPath, nil | ||||
| 	} | ||||
|  | ||||
| 	// Create specification for new virtual disk | ||||
| 	diskFormat := vclib.DiskFormatValidType[diskManager.volumeOptions.DiskFormat] | ||||
| 	vmDiskSpec := &types.FileBackedVirtualDiskSpec{ | ||||
| 		VirtualDiskSpec: types.VirtualDiskSpec{ | ||||
| 			AdapterType: diskManager.volumeOptions.SCSIControllerType, | ||||
| 			DiskType:    diskFormat, | ||||
| 		}, | ||||
| 		CapacityKb: int64(diskManager.volumeOptions.CapacityKB), | ||||
| 	} | ||||
|  | ||||
| 	vdm := object.NewVirtualDiskManager(datastore.Client()) | ||||
| 	requestTime := time.Now() | ||||
| 	// Create virtual disk | ||||
| 	task, err := vdm.CreateVirtualDisk(ctx, diskManager.diskPath, datastore.Datacenter.Datacenter, vmDiskSpec) | ||||
| 	if err != nil { | ||||
| 		vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) | ||||
| 		klog.Errorf("Failed to create virtual disk: %s. err: %+v", diskManager.diskPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	taskInfo, err := task.WaitForResult(ctx, nil) | ||||
| 	vclib.RecordvSphereMetric(vclib.APICreateVolume, requestTime, err) | ||||
| 	if err != nil { | ||||
| 		if isAlreadyExists(diskManager.diskPath, err) { | ||||
| 			// The disk already exists, log info message and return success | ||||
| 			klog.V(vclib.LogLevel).Infof("File: %v already exists", diskManager.diskPath) | ||||
| 			return diskManager.diskPath, nil | ||||
| 		} | ||||
| 		klog.Errorf("Failed to complete virtual disk creation: %s. err: %+v", diskManager.diskPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	canonicalDiskPath = taskInfo.Result.(string) | ||||
| 	return canonicalDiskPath, nil | ||||
| } | ||||
|  | ||||
| // Delete implements Disk's Delete interface | ||||
| func (diskManager virtualDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error { | ||||
| 	// Create a virtual disk manager | ||||
| 	virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client()) | ||||
| 	diskPath := vclib.RemoveStorageClusterORFolderNameFromVDiskPath(diskManager.diskPath) | ||||
| 	requestTime := time.Now() | ||||
| 	// Delete virtual disk | ||||
| 	task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter.Datacenter) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to delete virtual disk. err: %v", err) | ||||
| 		vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	err = task.Wait(ctx) | ||||
| 	vclib.RecordvSphereMetric(vclib.APIDeleteVolume, requestTime, err) | ||||
| 	if err != nil && !types.IsFileNotFound(err) { | ||||
| 		klog.Errorf("Failed to delete virtual disk. err: %v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| @@ -1,80 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package diskmanagers | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| ) | ||||
|  | ||||
| // VirtualDisk is for the Disk Management | ||||
| type VirtualDisk struct { | ||||
| 	DiskPath      string | ||||
| 	VolumeOptions *vclib.VolumeOptions | ||||
| 	VMOptions     *vclib.VMOptions | ||||
| } | ||||
|  | ||||
| // VirtualDisk Operations Const | ||||
| const ( | ||||
| 	VirtualDiskCreateOperation = "Create" | ||||
| 	VirtualDiskDeleteOperation = "Delete" | ||||
| ) | ||||
|  | ||||
| // VirtualDiskProvider defines interfaces for creating disk | ||||
| type VirtualDiskProvider interface { | ||||
| 	Create(ctx context.Context, datastore *vclib.Datastore) (string, error) | ||||
| 	Delete(ctx context.Context, datacenter *vclib.Datacenter) error | ||||
| } | ||||
|  | ||||
| // getDiskManager returns vmDiskManager or vdmDiskManager based on given volumeoptions | ||||
| func getDiskManager(disk *VirtualDisk, diskOperation string) VirtualDiskProvider { | ||||
| 	var diskProvider VirtualDiskProvider | ||||
| 	switch diskOperation { | ||||
| 	case VirtualDiskDeleteOperation: | ||||
| 		diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions} | ||||
| 	case VirtualDiskCreateOperation: | ||||
| 		if disk.VolumeOptions.StoragePolicyName != "" || disk.VolumeOptions.VSANStorageProfileData != "" || disk.VolumeOptions.StoragePolicyID != "" { | ||||
| 			diskProvider = vmDiskManager{disk.DiskPath, disk.VolumeOptions, disk.VMOptions} | ||||
| 		} else { | ||||
| 			diskProvider = virtualDiskManager{disk.DiskPath, disk.VolumeOptions} | ||||
| 		} | ||||
| 	} | ||||
| 	return diskProvider | ||||
| } | ||||
|  | ||||
| // Create gets appropriate disk manager and calls respective create method | ||||
| func (virtualDisk *VirtualDisk) Create(ctx context.Context, datastore *vclib.Datastore) (string, error) { | ||||
| 	if virtualDisk.VolumeOptions.DiskFormat == "" { | ||||
| 		virtualDisk.VolumeOptions.DiskFormat = vclib.ThinDiskType | ||||
| 	} | ||||
| 	if err := virtualDisk.VolumeOptions.VerifyVolumeOptions(); err != nil { | ||||
| 		klog.Errorf("VolumeOptions verification failed: %s (options: %+v)", err, virtualDisk.VolumeOptions) | ||||
| 		return "", fmt.Errorf("validation of parameters failed: %s", err) | ||||
| 	} | ||||
| 	if virtualDisk.VolumeOptions.StoragePolicyID != "" && virtualDisk.VolumeOptions.StoragePolicyName != "" { | ||||
| 		return "", fmt.Errorf("Storage Policy ID and Storage Policy Name both set, Please set only one parameter") | ||||
| 	} | ||||
| 	return getDiskManager(virtualDisk, VirtualDiskCreateOperation).Create(ctx, datastore) | ||||
| } | ||||
|  | ||||
| // Delete gets appropriate disk manager and calls respective delete method | ||||
| func (virtualDisk *VirtualDisk) Delete(ctx context.Context, datacenter *vclib.Datacenter) error { | ||||
| 	return getDiskManager(virtualDisk, VirtualDiskDeleteOperation).Delete(ctx, datacenter) | ||||
| } | ||||
| @@ -1,254 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package diskmanagers | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"hash/fnv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
|  | ||||
| 	"k8s.io/klog/v2" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| ) | ||||
|  | ||||
| // vmDiskManager implements VirtualDiskProvider interface for creating volume using Virtual Machine Reconfigure approach | ||||
| type vmDiskManager struct { | ||||
| 	diskPath      string | ||||
| 	volumeOptions *vclib.VolumeOptions | ||||
| 	vmOptions     *vclib.VMOptions | ||||
| } | ||||
|  | ||||
| // Create implements Disk's Create interface | ||||
| // Contains implementation of VM based Provisioning to provision disk with SPBM Policy or VSANStorageProfileData | ||||
| func (vmdisk vmDiskManager) Create(ctx context.Context, datastore *vclib.Datastore) (canonicalDiskPath string, err error) { | ||||
| 	if vmdisk.volumeOptions.SCSIControllerType == "" { | ||||
| 		vmdisk.volumeOptions.SCSIControllerType = vclib.PVSCSIControllerType | ||||
| 	} | ||||
| 	pbmClient, err := vclib.NewPbmClient(ctx, datastore.Client()) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("error occurred while creating new pbmClient, err: %+v", err) | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	if vmdisk.volumeOptions.StoragePolicyID == "" && vmdisk.volumeOptions.StoragePolicyName != "" { | ||||
| 		vmdisk.volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, vmdisk.volumeOptions.StoragePolicyName) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("error occurred while getting Profile Id from Profile Name: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyName, err) | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
| 	if vmdisk.volumeOptions.StoragePolicyID != "" { | ||||
| 		compatible, faultMessage, err := datastore.IsCompatibleWithStoragePolicy(ctx, vmdisk.volumeOptions.StoragePolicyID) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("error occurred while checking datastore compatibility with storage policy id: %s, err: %+v", vmdisk.volumeOptions.StoragePolicyID, err) | ||||
| 			return "", err | ||||
| 		} | ||||
|  | ||||
| 		if !compatible { | ||||
| 			klog.Errorf("Datastore: %s is not compatible with Policy: %s", datastore.Name(), vmdisk.volumeOptions.StoragePolicyName) | ||||
| 			return "", fmt.Errorf("user specified datastore is not compatible with the storagePolicy: %q. Failed with faults: %+q", vmdisk.volumeOptions.StoragePolicyName, faultMessage) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{} | ||||
| 	// Is PBM storage policy ID is present, set the storage spec profile ID, | ||||
| 	// else, set raw the VSAN policy string. | ||||
| 	if vmdisk.volumeOptions.StoragePolicyID != "" { | ||||
| 		storageProfileSpec.ProfileId = vmdisk.volumeOptions.StoragePolicyID | ||||
| 	} else if vmdisk.volumeOptions.VSANStorageProfileData != "" { | ||||
| 		// Check Datastore type - VSANStorageProfileData is only applicable to vSAN Datastore | ||||
| 		dsType, err := datastore.GetType(ctx) | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 		if dsType != vclib.VSANDatastoreType { | ||||
| 			klog.Errorf("The specified datastore: %q is not a VSAN datastore", datastore.Name()) | ||||
| 			return "", fmt.Errorf("the specified datastore: %q is not a VSAN datastore."+ | ||||
| 				" the policy parameters will work only with VSAN Datastore."+ | ||||
| 				" so, please specify a valid VSAN datastore in Storage class definition", datastore.Name()) | ||||
| 		} | ||||
| 		storageProfileSpec.ProfileId = "" | ||||
| 		storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{ | ||||
| 			ExtensionKey: "com.vmware.vim.sps", | ||||
| 			ObjectData:   vmdisk.volumeOptions.VSANStorageProfileData, | ||||
| 		} | ||||
| 	} else { | ||||
| 		klog.Errorf("Both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") | ||||
| 		return "", fmt.Errorf("both volumeOptions.StoragePolicyID and volumeOptions.VSANStorageProfileData are not set. One of them should be set") | ||||
| 	} | ||||
| 	var dummyVM *vclib.VirtualMachine | ||||
| 	// Check if VM already exist in the folder. | ||||
| 	// If VM is already present, use it, else create a new dummy VM. | ||||
| 	fnvHash := fnv.New32a() | ||||
| 	fnvHash.Write([]byte(vmdisk.volumeOptions.Name)) | ||||
| 	dummyVMFullName := vclib.DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) | ||||
| 	dummyVM, err = datastore.Datacenter.GetVMByPath(ctx, vmdisk.vmOptions.VMFolder.InventoryPath+"/"+dummyVMFullName) | ||||
| 	if err != nil { | ||||
| 		// Create a dummy VM | ||||
| 		klog.V(1).Infof("Creating Dummy VM: %q", dummyVMFullName) | ||||
| 		dummyVM, err = vmdisk.createDummyVM(ctx, datastore.Datacenter, dummyVMFullName) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("failed to create Dummy VM. err: %v", err) | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Reconfigure the VM to attach the disk with the VSAN policy configured | ||||
| 	virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} | ||||
| 	disk, _, err := dummyVM.CreateDiskSpec(ctx, vmdisk.diskPath, datastore, vmdisk.volumeOptions) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("failed to create Disk Spec. err: %v", err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	deviceConfigSpec := &types.VirtualDeviceConfigSpec{ | ||||
| 		Device:        disk, | ||||
| 		Operation:     types.VirtualDeviceConfigSpecOperationAdd, | ||||
| 		FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate, | ||||
| 	} | ||||
|  | ||||
| 	deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec) | ||||
| 	virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec) | ||||
| 	fileAlreadyExist := false | ||||
| 	task, err := dummyVM.Reconfigure(ctx, virtualMachineConfigSpec) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to reconfig. err: %v", err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	err = task.Wait(ctx) | ||||
| 	if err != nil { | ||||
| 		fileAlreadyExist = isAlreadyExists(vmdisk.diskPath, err) | ||||
| 		if fileAlreadyExist { | ||||
| 			//Skip error and continue to detach the disk as the disk was already created on the datastore. | ||||
| 			klog.V(vclib.LogLevel).Infof("File: %v already exists", vmdisk.diskPath) | ||||
| 		} else { | ||||
| 			klog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
| 	// Detach the disk from the dummy VM. | ||||
| 	err = dummyVM.DetachDisk(ctx, vmdisk.diskPath) | ||||
| 	if err != nil { | ||||
| 		if vclib.DiskNotFoundErrMsg == err.Error() && fileAlreadyExist { | ||||
| 			// Skip error if disk was already detached from the dummy VM but still present on the datastore. | ||||
| 			klog.V(vclib.LogLevel).Infof("File: %v is already detached", vmdisk.diskPath) | ||||
| 		} else { | ||||
| 			klog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmdisk.diskPath, dummyVMFullName, err) | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
| 	//  Delete the dummy VM | ||||
| 	err = dummyVM.DeleteVM(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) | ||||
| 	} | ||||
| 	return vmdisk.diskPath, nil | ||||
| } | ||||
|  | ||||
| func (vmdisk vmDiskManager) Delete(ctx context.Context, datacenter *vclib.Datacenter) error { | ||||
| 	return fmt.Errorf("vmDiskManager.Delete is not supported") | ||||
| } | ||||
|  | ||||
| // CreateDummyVM create a Dummy VM at specified location with given name. | ||||
| func (vmdisk vmDiskManager) createDummyVM(ctx context.Context, datacenter *vclib.Datacenter, vmName string) (*vclib.VirtualMachine, error) { | ||||
| 	// Create a virtual machine config spec with 1 SCSI adapter. | ||||
| 	virtualMachineConfigSpec := types.VirtualMachineConfigSpec{ | ||||
| 		Name: vmName, | ||||
| 		Files: &types.VirtualMachineFileInfo{ | ||||
| 			VmPathName: "[" + vmdisk.volumeOptions.Datastore + "]", | ||||
| 		}, | ||||
| 		NumCPUs:  1, | ||||
| 		MemoryMB: 4, | ||||
| 		DeviceChange: []types.BaseVirtualDeviceConfigSpec{ | ||||
| 			&types.VirtualDeviceConfigSpec{ | ||||
| 				Operation: types.VirtualDeviceConfigSpecOperationAdd, | ||||
| 				Device: &types.ParaVirtualSCSIController{ | ||||
| 					VirtualSCSIController: types.VirtualSCSIController{ | ||||
| 						SharedBus: types.VirtualSCSISharingNoSharing, | ||||
| 						VirtualController: types.VirtualController{ | ||||
| 							BusNumber: 0, | ||||
| 							VirtualDevice: types.VirtualDevice{ | ||||
| 								Key: 1000, | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	task, err := vmdisk.vmOptions.VMFolder.CreateVM(ctx, virtualMachineConfigSpec, vmdisk.vmOptions.VMResourcePool, nil) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create VM. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	dummyVMTaskInfo, err := task.WaitForResult(ctx, nil) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred while waiting for create VM task result. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	vmRef := dummyVMTaskInfo.Result.(object.Reference) | ||||
| 	dummyVM := object.NewVirtualMachine(datacenter.Client(), vmRef.Reference()) | ||||
| 	return &vclib.VirtualMachine{VirtualMachine: dummyVM, Datacenter: datacenter}, nil | ||||
| } | ||||
|  | ||||
| // CleanUpDummyVMs deletes stale dummyVM's | ||||
| func CleanUpDummyVMs(ctx context.Context, folder *vclib.Folder) error { | ||||
| 	vmList, err := folder.GetVirtualMachines(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.V(4).Infof("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	if vmList == nil || len(vmList) == 0 { | ||||
| 		klog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) | ||||
| 		return fmt.Errorf("no virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) | ||||
| 	} | ||||
| 	var dummyVMList []*vclib.VirtualMachine | ||||
| 	// Loop through VM's in the Kubernetes cluster to find dummy VM's | ||||
| 	for _, vm := range vmList { | ||||
| 		vmName, err := vm.ObjectName(ctx) | ||||
| 		if err != nil { | ||||
| 			klog.V(4).Infof("Unable to get name from VM with err: %+v", err) | ||||
| 			continue | ||||
| 		} | ||||
| 		if strings.HasPrefix(vmName, vclib.DummyVMPrefixName) { | ||||
| 			vmObj := vclib.VirtualMachine{VirtualMachine: object.NewVirtualMachine(folder.Client(), vm.Reference())} | ||||
| 			dummyVMList = append(dummyVMList, &vmObj) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, vm := range dummyVMList { | ||||
| 		err = vm.DeleteVM(ctx) | ||||
| 		if err != nil { | ||||
| 			klog.V(4).Infof("Unable to delete dummy VM with err: %+v", err) | ||||
| 			continue | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func isAlreadyExists(path string, err error) bool { | ||||
| 	errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", path) | ||||
| 	if errorMessage == err.Error() { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| @@ -1,47 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // Folder extends the govmomi Folder object | ||||
| type Folder struct { | ||||
| 	*object.Folder | ||||
| 	Datacenter *Datacenter | ||||
| } | ||||
|  | ||||
| // GetVirtualMachines returns list of VirtualMachine inside a folder. | ||||
| func (folder *Folder) GetVirtualMachines(ctx context.Context) ([]*VirtualMachine, error) { | ||||
| 	vmFolders, err := folder.Children(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get children from Folder: %s. err: %+v", folder.InventoryPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var vmObjList []*VirtualMachine | ||||
| 	for _, vmFolder := range vmFolders { | ||||
| 		if vmFolder.Reference().Type == VirtualMachineType { | ||||
| 			vmObj := VirtualMachine{object.NewVirtualMachine(folder.Client(), vmFolder.Reference()), folder.Datacenter} | ||||
| 			vmObjList = append(vmObjList, &vmObj) | ||||
| 		} | ||||
| 	} | ||||
| 	return vmObjList, nil | ||||
| } | ||||
| @@ -1,83 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"path" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/simulator" | ||||
| ) | ||||
|  | ||||
| func TestFolder(t *testing.T) { | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	model := simulator.VPX() | ||||
| 	// Child folder "F0" will be created under the root folder and datacenter folders, | ||||
| 	// and all resources are created within the "F0" child folders. | ||||
| 	model.Folder = 1 | ||||
|  | ||||
| 	defer model.Remove() | ||||
| 	err := model.Create() | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	s := model.Service.NewServer() | ||||
| 	defer s.Close() | ||||
|  | ||||
| 	c, err := govmomi.NewClient(ctx, s.URL, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	vc := &VSphereConnection{Client: c.Client} | ||||
|  | ||||
| 	dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	const folderName = "F0" | ||||
| 	vmFolder := path.Join("/", folderName, dc.Name(), "vm") | ||||
|  | ||||
| 	tests := []struct { | ||||
| 		folderPath string | ||||
| 		expect     int | ||||
| 	}{ | ||||
| 		{vmFolder, 0}, | ||||
| 		{path.Join(vmFolder, folderName), (model.Host + model.Cluster) * model.Machine}, | ||||
| 	} | ||||
|  | ||||
| 	for i, test := range tests { | ||||
| 		folder, cerr := dc.GetFolderByPath(ctx, test.folderPath) | ||||
| 		if cerr != nil { | ||||
| 			t.Fatal(cerr) | ||||
| 		} | ||||
|  | ||||
| 		vms, cerr := folder.GetVirtualMachines(ctx) | ||||
| 		if cerr != nil { | ||||
| 			t.Fatalf("%d: %s", i, cerr) | ||||
| 		} | ||||
|  | ||||
| 		if len(vms) != test.expect { | ||||
| 			t.Errorf("%d: expected %d VMs, got: %d", i, test.expect, len(vms)) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @@ -1,169 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/pbm" | ||||
| 	"k8s.io/klog/v2" | ||||
|  | ||||
| 	pbmtypes "github.com/vmware/govmomi/pbm/types" | ||||
| 	"github.com/vmware/govmomi/vim25" | ||||
| ) | ||||
|  | ||||
| // PbmClient is extending govmomi pbm, and provides functions to get compatible list of datastore for given policy | ||||
| type PbmClient struct { | ||||
| 	*pbm.Client | ||||
| } | ||||
|  | ||||
| // NewPbmClient returns a new PBM Client object | ||||
| func NewPbmClient(ctx context.Context, client *vim25.Client) (*PbmClient, error) { | ||||
| 	pbmClient, err := pbm.NewClient(ctx, client) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create new Pbm Client. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &PbmClient{pbmClient}, nil | ||||
| } | ||||
|  | ||||
| // IsDatastoreCompatible check if the datastores is compatible for given storage policy id | ||||
| // if datastore is not compatible with policy, fault message with the Datastore Name is returned | ||||
| func (pbmClient *PbmClient) IsDatastoreCompatible(ctx context.Context, storagePolicyID string, datastore *Datastore) (bool, string, error) { | ||||
| 	faultMessage := "" | ||||
| 	placementHub := pbmtypes.PbmPlacementHub{ | ||||
| 		HubType: datastore.Reference().Type, | ||||
| 		HubId:   datastore.Reference().Value, | ||||
| 	} | ||||
| 	hubs := []pbmtypes.PbmPlacementHub{placementHub} | ||||
| 	req := []pbmtypes.BasePbmPlacementRequirement{ | ||||
| 		&pbmtypes.PbmPlacementCapabilityProfileRequirement{ | ||||
| 			ProfileId: pbmtypes.PbmProfileId{ | ||||
| 				UniqueId: storagePolicyID, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	compatibilityResult, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred for CheckRequirements call. err %+v", err) | ||||
| 		return false, "", err | ||||
| 	} | ||||
| 	if compatibilityResult != nil && len(compatibilityResult) > 0 { | ||||
| 		compatibleHubs := compatibilityResult.CompatibleDatastores() | ||||
| 		if compatibleHubs != nil && len(compatibleHubs) > 0 { | ||||
| 			return true, "", nil | ||||
| 		} | ||||
| 		dsName, err := datastore.ObjectName(ctx) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Failed to get datastore ObjectName") | ||||
| 			return false, "", err | ||||
| 		} | ||||
| 		if compatibilityResult[0].Error[0].LocalizedMessage == "" { | ||||
| 			faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy." | ||||
| 		} else { | ||||
| 			faultMessage = "Datastore: " + dsName + " is not compatible with the storage policy. LocalizedMessage: " + compatibilityResult[0].Error[0].LocalizedMessage + "\n" | ||||
| 		} | ||||
| 		return false, faultMessage, nil | ||||
| 	} | ||||
| 	return false, "", fmt.Errorf("compatibilityResult is nil or empty") | ||||
| } | ||||
|  | ||||
| // GetCompatibleDatastores filters and returns compatible list of datastores for given storage policy id | ||||
| // For Non Compatible Datastores, fault message with the Datastore Name is also returned | ||||
| func (pbmClient *PbmClient) GetCompatibleDatastores(ctx context.Context, storagePolicyID string, datastores []*DatastoreInfo) ([]*DatastoreInfo, string, error) { | ||||
| 	var ( | ||||
| 		dsMorNameMap                                = getDsMorNameMap(ctx, datastores) | ||||
| 		localizedMessagesForNotCompatibleDatastores = "" | ||||
| 	) | ||||
| 	compatibilityResult, err := pbmClient.GetPlacementCompatibilityResult(ctx, storagePolicyID, datastores) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred while retrieving placement compatibility result for datastores: %+v with storagePolicyID: %s. err: %+v", datastores, storagePolicyID, err) | ||||
| 		return nil, "", err | ||||
| 	} | ||||
| 	compatibleHubs := compatibilityResult.CompatibleDatastores() | ||||
| 	var compatibleDatastoreList []*DatastoreInfo | ||||
| 	for _, hub := range compatibleHubs { | ||||
| 		compatibleDatastoreList = append(compatibleDatastoreList, getDatastoreFromPlacementHub(datastores, hub)) | ||||
| 	} | ||||
| 	for _, res := range compatibilityResult { | ||||
| 		for _, err := range res.Error { | ||||
| 			dsName := dsMorNameMap[res.Hub.HubId] | ||||
| 			localizedMessage := "" | ||||
| 			if err.LocalizedMessage != "" { | ||||
| 				localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. LocalizedMessage: " + err.LocalizedMessage + "\n" | ||||
| 			} else { | ||||
| 				localizedMessage = "Datastore: " + dsName + " not compatible with the storage policy. \n" | ||||
| 			} | ||||
| 			localizedMessagesForNotCompatibleDatastores += localizedMessage | ||||
| 		} | ||||
| 	} | ||||
| 	// Return an error if there are no compatible datastores. | ||||
| 	if len(compatibleHubs) < 1 { | ||||
| 		klog.Errorf("No compatible datastores found that satisfy the storage policy requirements: %s", storagePolicyID) | ||||
| 		return nil, localizedMessagesForNotCompatibleDatastores, fmt.Errorf("No compatible datastores found that satisfy the storage policy requirements") | ||||
| 	} | ||||
| 	return compatibleDatastoreList, localizedMessagesForNotCompatibleDatastores, nil | ||||
| } | ||||
|  | ||||
| // GetPlacementCompatibilityResult gets placement compatibility result based on storage policy requirements. | ||||
| func (pbmClient *PbmClient) GetPlacementCompatibilityResult(ctx context.Context, storagePolicyID string, datastore []*DatastoreInfo) (pbm.PlacementCompatibilityResult, error) { | ||||
| 	var hubs []pbmtypes.PbmPlacementHub | ||||
| 	for _, ds := range datastore { | ||||
| 		hubs = append(hubs, pbmtypes.PbmPlacementHub{ | ||||
| 			HubType: ds.Reference().Type, | ||||
| 			HubId:   ds.Reference().Value, | ||||
| 		}) | ||||
| 	} | ||||
| 	req := []pbmtypes.BasePbmPlacementRequirement{ | ||||
| 		&pbmtypes.PbmPlacementCapabilityProfileRequirement{ | ||||
| 			ProfileId: pbmtypes.PbmProfileId{ | ||||
| 				UniqueId: storagePolicyID, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred for CheckRequirements call. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return res, nil | ||||
| } | ||||
|  | ||||
| // getDataStoreForPlacementHub returns matching datastore associated with given pbmPlacementHub | ||||
| func getDatastoreFromPlacementHub(datastore []*DatastoreInfo, pbmPlacementHub pbmtypes.PbmPlacementHub) *DatastoreInfo { | ||||
| 	for _, ds := range datastore { | ||||
| 		if ds.Reference().Type == pbmPlacementHub.HubType && ds.Reference().Value == pbmPlacementHub.HubId { | ||||
| 			return ds | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // getDsMorNameMap returns map of ds Mor and Datastore Object Name | ||||
| func getDsMorNameMap(ctx context.Context, datastores []*DatastoreInfo) map[string]string { | ||||
| 	dsMorNameMap := make(map[string]string) | ||||
| 	for _, ds := range datastores { | ||||
| 		dsObjectName, err := ds.ObjectName(ctx) | ||||
| 		if err == nil { | ||||
| 			dsMorNameMap[ds.Reference().Value] = dsObjectName | ||||
| 		} else { | ||||
| 			klog.Errorf("Error occurred while getting datastore object name. err: %+v", err) | ||||
| 		} | ||||
| 	} | ||||
| 	return dsMorNameMap | ||||
| } | ||||
| @@ -1 +0,0 @@ | ||||
| Keys in this directory are generated for testing purposes only. | ||||
| @@ -1,51 +0,0 @@ | ||||
| -----BEGIN RSA PRIVATE KEY----- | ||||
| MIIJJwIBAAKCAgEA4CKLwCPwMUIVaGhvZxLmXEzDflILVaGCZRRBbfYucfysylT/ | ||||
| JKPMlKs3ORNVW1cdiW1z/ZUlAlN+eqq40WSVQJqLUeXltsfZwemdFmf3SAWIu9v9 | ||||
| wI5mhLQJMh2XPKNILCBhrET/ANLVPbObJUFvGavpR9XVXTXsLUvuCR+oSpDvQYyn | ||||
| WKJ5dAwqKaFx3GCEFAm0dNnSzliQrzKFOE0DUMxFQH5Lt2EYLHrya+K4ZtYbX5nK | ||||
| X++T9R5pZs0npqmTQS/rIffv2hT89tKdqPz/MCt5xwmjsAO2uri5O+WaLUIkf8Bd | ||||
| fmVAusE/5v2p3x3MH0rUXaNPg7FqLj1cnbcwHqqt3PmVl9VZINkPbnHHiua21GNq | ||||
| DAZ/G/vP8/hlXwIeE8d6YPsSPm4NEH0Ku+yk0TEL6QkGFMYYpyCw1BNYGXd+zvf1 | ||||
| xjZtGrcViHhesxuv71nGdJbNSi7zwkYXydSKCNnjJ+Oqyip5uUC+DmydqcJTQLcZ | ||||
| W5ObNfeB8PLz6UuVidMffh8evE13L60cS5wZyZWherMqB+I/uREt05gikCtlJVOo | ||||
| shuLS0QjbK/INYCSFBJjt0xrwTbw13SQsEhktQYdqTHaDBWi6uh+pcY9msF1jZJ+ | ||||
| GAEPYcLzK3o2/kE6g09TZ3QDeP9bEDTllL+mIs4JGiWGNC/eGjGfyyAnfmECAwEA | ||||
| AQKCAf88aRNBtm4G2MjsWzmrjmyIdCg84+AqNF3w4ITCHphmILRx1HbwaTW63GsF | ||||
| 9zAKbnCHmfipYImZFugAKAOobHPN9dmXOV+w5CzNFyo/38XGo7c26xR50efP3Lad | ||||
| y1v3/Ap32kJ5LB+PGURgXQh0Ai7vvGYj9n6LoP0HOG/wBZhWgLn78O0p9qDFpoG2 | ||||
| tsz5mQoAXJ1G4W7wLu7QSc2eXyOFo4kG2QOPaZwaYQj2CyWokgzOt6TUNr6qUogW | ||||
| LTWCtjH6X/AAN9Nt9Do6TIoyAf7F/PHVs8NqrZWSvjcu7bOgfzNXO4H3j1LjAzM2 | ||||
| Dyi5+k4KISEcG+hSln8H94H/AGD3Yea44sDnIZoOtKTB+O7V+jyU7qwtX9QaEu04 | ||||
| CslnZOun0/PR/C9mI7QaGu1YJcxdIw9Nlj07+iAzI4ZjuO+qHeUM7SNvH/MVbglA | ||||
| 2ZDkp7J3VlJgFObvHdINZMWNO1FIg/pc7TcXNsUkNAwnCwLh6//5/cZF+BtGlc4A | ||||
| SGkhYKX3dRp8qLjNKxux3VHROoDByJDEUcnn0fEAm9aMbV+PofpghJtQqnKbsMn8 | ||||
| iF29v+9+JBIHFxAwhCIv9atF82VHt/sGPcsRqohttRWJDaUMBM3N8rvciiigcYzh | ||||
| c/o4kH0YNoFSs4+evhYQDxk8yIGsgyuGfnW5QaLUIPa2AxblAoIBAQDyfoJr3UFq | ||||
| LfkTkYHjAo4eua1vzlM3/8aFFnuQhMeoFvw4aA26x1DsUUozIRXTWWbnFH6GY8T3 | ||||
| B46jgWcO4UaMqbxQxTpHSDQFSVn/budugxGU70WQ9LcjSobk9uCXgk2MmRn9tA/6 | ||||
| +ergswSEuPxyNzgDF60BTrS5V2Akh6eF/sYZWnMKazZ3kdw1V5Y/IxfNH1yo6GRz | ||||
| PTPVyyX6kU3+DNQSplgcsKYFhyoT2HPIRaxR1fTIw9E5w1rQWanYz/A0I3SDECsc | ||||
| qJDy1rzC+0Tye2XLcWzHu5l1ng8GPLQJfjEtMTKXMIHjpLFC1P4hXNrlxTOnALSS | ||||
| 95bwzvDqfxULAoIBAQDsnkUVOvoXrE9xRI2EyWi1K08i5YSwy3rtV+uJP2Zyy4uB | ||||
| r3TfzxFnYdXWykzHJGqHd6N5M6vCmbcLMS0G9z5QpDhrIF5vk26P9isvZ3k7rkWG | ||||
| jgwif3kBcPQXlCDgwwnVmGsBf/A+2z3HOfNPK3Iy3VamFvYD52wgL8+N0puZ42aU | ||||
| aH759JjLGcaVZWzWNdIcpS1OsBucGXCj3IeHmLjhJFbVebIHJ4rCs7gj51H8R8uk | ||||
| fksxsgfPdRRpYq7NkDOzVDPb/KtTf5C4ZDogRaxj765DMnn6LhBFQVuDWEDJgjlF | ||||
| Aolt8ynskf3xd19nlX99QAzXnql6LLClwps6G8XDAoIBADzhslDufevwmuZk091w | ||||
| 2MmyCG9Xt+EJYIgtetxv2cjD7JMk3L2WKSULy7tGhTpI6eL+bD3FcsAqr48xf/Rm | ||||
| btYGD3ef7N/Uqurg3a2Z5JUEZzejUy3vosNDhNabfQvM9TdlgPcHbDOw511+1JWV | ||||
| 9Bug7XkpSpBXeFxIKaVCQbcMniPjZ5qoDEa84jKqSNiVMPaY9ySZJA8iwI7esCxW | ||||
| quQryFreVKTvXN9qbhAJehhAFeF9/DUjpLYB7Bz/RftfSYltlWUKfCh30dyGOWIi | ||||
| v865WHdZhNwop4C2LEN+nhz8B9C212LKFPJYeQC0hRFPRM4HUs6NCMkVTFotOqNF | ||||
| QL0CggEAGXBysPOkS8NEz0K1jF8zGLdNTM0sVO2ri7T2J81fMFxd5VV91Uon7tt/ | ||||
| 6BXb51Us9t+P/cnmX4ezPErPMn6GfpkJT8stHAXXzzaCMhiH2jjEVNEU0Oivk84X | ||||
| ECnm1wNhHUvDxWeB5uAfZjn+xLZBEuLlG/o//O92modJY1APVp4yOyZ48FqxyrQ8 | ||||
| u3cqGmWy701674jTjxbVG2jsUVHEHsCPbWgmEcrYilJUK9gE4oC9jjPd1bv0RwOp | ||||
| bCMl9Afa5x7YbIBf0xxV7N0puqqC/EOakrLslk85hJigRCDK5l9P1PGO4PlRupN/ | ||||
| n+Rbp4FVMZwfRVdTlUUUwN2JXtf5jQKCAQEAqSMv1mkLS3qnmW1E/qAYrEmMlHZo | ||||
| 253wuwsO0XS7xCxcEumIvjYCvhnHPYIO2rqsscmk42gYe/OUfteMb71BJ+HnlyOo | ||||
| 9oDbZg8W2DSUzTUy0yT/JMcNTwVCPeVj+bZ/LzDP5jKmZ7vXZkLGQCgU6ENVmsCg | ||||
| b8nKz0xc7o8jERaSGY+h3LthXF0wAZJ3NdbnJjFbL8hYpwTrD6xd/yg3M5grrCLe | ||||
| iBKfdpCIN6VrqI9VymoPZryb1OVEiClt0LHWTIXQPcH2J/CrMeWoGhRBW3yTAECf | ||||
| HPhYMZddW2y6uOFjRcUCu2HG35ogEYlDd0kjH1HhPC2xXcFQBmOyPpEeDQ== | ||||
| -----END RSA PRIVATE KEY----- | ||||
| @@ -1,29 +0,0 @@ | ||||
| -----BEGIN CERTIFICATE----- | ||||
| MIIE/jCCAuYCCQDRJ2qPhdmG0DANBgkqhkiG9w0BAQsFADBAMQswCQYDVQQGEwJV | ||||
| UzELMAkGA1UECAwCQ0ExEzARBgNVBAoMCkFjbWUsIEluYy4xDzANBgNVBAMMBnNv | ||||
| bWVDQTAgFw0xODA2MDgxMzM5MjFaGA8yMjE4MDQyMTEzMzkyMVowQDELMAkGA1UE | ||||
| BhMCVVMxCzAJBgNVBAgMAkNBMRMwEQYDVQQKDApBY21lLCBJbmMuMQ8wDQYDVQQD | ||||
| DAZzb21lQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDgIovAI/Ax | ||||
| QhVoaG9nEuZcTMN+UgtVoYJlFEFt9i5x/KzKVP8ko8yUqzc5E1VbVx2JbXP9lSUC | ||||
| U356qrjRZJVAmotR5eW2x9nB6Z0WZ/dIBYi72/3AjmaEtAkyHZc8o0gsIGGsRP8A | ||||
| 0tU9s5slQW8Zq+lH1dVdNewtS+4JH6hKkO9BjKdYonl0DCopoXHcYIQUCbR02dLO | ||||
| WJCvMoU4TQNQzEVAfku3YRgsevJr4rhm1htfmcpf75P1HmlmzSemqZNBL+sh9+/a | ||||
| FPz20p2o/P8wK3nHCaOwA7a6uLk75ZotQiR/wF1+ZUC6wT/m/anfHcwfStRdo0+D | ||||
| sWouPVydtzAeqq3c+ZWX1Vkg2Q9ucceK5rbUY2oMBn8b+8/z+GVfAh4Tx3pg+xI+ | ||||
| bg0QfQq77KTRMQvpCQYUxhinILDUE1gZd37O9/XGNm0atxWIeF6zG6/vWcZ0ls1K | ||||
| LvPCRhfJ1IoI2eMn46rKKnm5QL4ObJ2pwlNAtxlbk5s194Hw8vPpS5WJ0x9+Hx68 | ||||
| TXcvrRxLnBnJlaF6syoH4j+5ES3TmCKQK2UlU6iyG4tLRCNsr8g1gJIUEmO3TGvB | ||||
| NvDXdJCwSGS1Bh2pMdoMFaLq6H6lxj2awXWNkn4YAQ9hwvMrejb+QTqDT1NndAN4 | ||||
| /1sQNOWUv6YizgkaJYY0L94aMZ/LICd+YQIDAQABMA0GCSqGSIb3DQEBCwUAA4IC | ||||
| AQBYBRH/q3gB4gEiOAUl9HbnoUb7MznZ0uQTH7fUYqr66ceZkg9w1McbwiAeZAaY | ||||
| qQWwr3u4A8/Bg8csE2yQTsXeA33FP3Q6obyuYn4q7e++4+9SLkbSSQfbB67pGUK5 | ||||
| /pal6ULrLGzs69fbL1tOaA/VKQJndg3N9cftyiIUWTzHDop8SLmIobWVRtPQHf00 | ||||
| oKq8loakyluQdxQxnGdl7vMXwSpSpIH84TOdy2JN90MzVLgOz55sb/wRYfhClNFD | ||||
| +1sb2V4nL2w1kXaO2UVPzk7qpG5FE54JPvvN67Ec4JjMSnGo8l3dJ9jGEmgBIML3 | ||||
| l1onrti2HStSs1vR4Ax0xok08okRlrGA4FqQiSx853T5uLa/JLmWfLKg9ixR4ZV+ | ||||
| dF+2ZrFwDLZUr4VeaDd2v2mQFBNLvdZrqp1OZ4B/1+H5S8ucb+oVhGqzDkEvRCc+ | ||||
| WYpNxx7kpwZPTLmMYTXXKdTWfpgz9GL0LSkY8d1rxLwHxtV8EzAkV+zIWix4h/IE | ||||
| 0FG4WvhrttMCu8ulZhGGoVqy7gdb4+ViWnUYNuCCjIcRJj7SeZaDawBASa/jZwik | ||||
| Hxrwn0osGUqEUBmvjDdXJpTaKCr2GFOvhCM2pG6AXa14b5hS2DgbX+NZYcScYtVC | ||||
| vn2HMDjnIEF4uOfDJU5eLok4jli5+VwzOQ7hOHs3DIm4+g== | ||||
| -----END CERTIFICATE----- | ||||
| @@ -1,93 +0,0 @@ | ||||
| #!/usr/bin/env bash | ||||
|  | ||||
| # Copyright 2018 The Kubernetes Authors. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
| set -eu | ||||
|  | ||||
| readonly VALID_DAYS='73000' | ||||
| readonly RSA_KEY_SIZE='4096' | ||||
|  | ||||
| createKey() { | ||||
|   openssl genrsa \ | ||||
|     -out "$1" \ | ||||
|     "$RSA_KEY_SIZE" | ||||
| } | ||||
|  | ||||
| createCaCert() { | ||||
|   openssl req \ | ||||
|     -x509 \ | ||||
|     -subj "$( getSubj 'someCA' )" \ | ||||
|     -new \ | ||||
|     -nodes \ | ||||
|     -key "$2" \ | ||||
|     -sha256 \ | ||||
|     -days "$VALID_DAYS" \ | ||||
|     -out "$1" | ||||
| } | ||||
|  | ||||
| createCSR() { | ||||
|   openssl req \ | ||||
|     -new \ | ||||
|     -sha256 \ | ||||
|     -key "$2" \ | ||||
|     -subj "$( getSubj 'localhost' )" \ | ||||
|     -reqexts SAN \ | ||||
|     -config <( getSANConfig ) \ | ||||
|     -out "$1" | ||||
| } | ||||
|  | ||||
| signCSR() { | ||||
|   openssl x509 \ | ||||
|     -req \ | ||||
|     -in "$2" \ | ||||
|     -CA "$3" \ | ||||
|     -CAkey "$4" \ | ||||
|     -CAcreateserial \ | ||||
|     -days "$VALID_DAYS" \ | ||||
|     -sha256 \ | ||||
|     -extfile <( getSAN ) \ | ||||
|     -out "$1" | ||||
| } | ||||
|  | ||||
| getSubj() { | ||||
|   local cn="${1:-someRandomCN}" | ||||
|   echo "/C=US/ST=CA/O=Acme, Inc./CN=${cn}" | ||||
| } | ||||
|  | ||||
| getSAN() { | ||||
|   printf "subjectAltName=DNS:localhost,IP:127.0.0.1" | ||||
| } | ||||
|  | ||||
| getSANConfig() { | ||||
|   cat /etc/ssl/openssl.cnf | ||||
|   printf '\n[SAN]\n' | ||||
|   getSAN | ||||
| } | ||||
|  | ||||
| main() { | ||||
|   local caCertPath="./ca.pem" | ||||
|   local caKeyPath="./ca.key" | ||||
|   local serverCsrPath="./server.csr" | ||||
|   local serverCertPath="./server.pem" | ||||
|   local serverKeyPath="./server.key" | ||||
|  | ||||
|   createKey "$caKeyPath" | ||||
|   createCaCert "$caCertPath" "$caKeyPath" | ||||
|   createKey "$serverKeyPath" | ||||
|   createCSR "$serverCsrPath" "$serverKeyPath" | ||||
|   signCSR "$serverCertPath" "$serverCsrPath" "$caCertPath" "$caKeyPath" | ||||
| } | ||||
|  | ||||
| main "$@" | ||||
| @@ -1 +0,0 @@ | ||||
| this is some invalid content | ||||
| @@ -1,28 +0,0 @@ | ||||
| -----BEGIN CERTIFICATE REQUEST----- | ||||
| MIIEtTCCAp0CAQAwQzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRMwEQYDVQQK | ||||
| DApBY21lLCBJbmMuMRIwEAYDVQQDDAlsb2NhbGhvc3QwggIiMA0GCSqGSIb3DQEB | ||||
| AQUAA4ICDwAwggIKAoICAQCVkk5HMKNvMXVJoJcUfKK252UT6rdnlsaFLZOlcbp3 | ||||
| otqiq3A2jhQLeL5Ocyd22s/ak2RX9liK+ynV8fP3YWoUBP5elhwbykubiIvSTRS5 | ||||
| 85Z0s9NfzscImMpnivt+bOy3KOoriy/0jfJ7WMqLRUTUEusXUpW8QT/U9cK6DrwQ | ||||
| E/9oXTr669yvqjyFsxjOB0pLOFFib0LeQZxrA2h+oAP8qT/Of6kyTgGWjLhSC1cV | ||||
| eCPZsSeZUT61FbIu/b5M42WYuddoFbf8y9m0oLeYizYob7poE25jw91bNa8y2nfS | ||||
| v+JuCcfO4wq29cnldGFNpJPhBhc1sbBvVshXXKWdfzN1c8RCS5hNANy1phAJ7RFe | ||||
| 3Uj0WneBVBHHJMz7Qh61uxTST1W8HBDTuaBTxGKTcPFWd9u4lj/BEScRFOSC/qiO | ||||
| 1HCKzOsYhjnHfql5GzfQKpEy/e4m2oL8VTqcJBsfHCyxDIH+6Y3ovttymxAUPJ14 | ||||
| r3mG9FDLq1va/+8xzDswyjmRIVQeOgvllzgM5vCKqz6nsXtLRYgkwHMk5yOaAIzO | ||||
| BnsmZztsyaubjcYvM5pUsiO49VWk6ntiAn+WpF/sreFlesx1peQKbTVovwvn137d | ||||
| V92Oncce+ZikKHxtz4qOz+dH1Fz7Ykor8fXcsfdbkKvwWdz8U/pOBu+83CxBXTWA | ||||
| bwIDAQABoC0wKwYJKoZIhvcNAQkOMR4wHDAaBgNVHREEEzARgglsb2NhbGhvc3SH | ||||
| BH8AAAEwDQYJKoZIhvcNAQELBQADggIBADgJfI3xRKlOInZQjg+afz+L477IiFmP | ||||
| Pf0qwO/EqBkCmbDbmvXpXi/y9Ffh6bMx2naN873nW3k1uVG2W0O4Bl7di9PkmRxY | ||||
| ktcWY+CaxDT5+Y3LmrqICgrZmELTuV5G8xX2/7bpdEtY4sWpoOeOun+CeGTCeUGx | ||||
| sGxOWrhydYwrkowupPthYreIIBBPHWl2gEw/m+Y7aJZGtKnDD9eCbF6RxmXRWHDu | ||||
| 0Ly+F3veXbht9LjKPFsgfsogo33Nl8+W1LCActKNY7NMDdGkc+RqaTyxhYEwomui | ||||
| N1NDOW1qHqSyp2RC13cXokfLL58WGXS6PpNhSln9u4ZG9a+TY+vw1qC//1CyTicY | ||||
| ylyEn2qfqTSG3W7T/u6ZTL0MpMjFv8VigpffJcFDjq6lVH8LyTniSXdCREy78jAo | ||||
| 8O/2tzJtWrar8bbeN7KCwVcJVaK15a1GWZmo5Ei33U/2Tm+UyRbWL8eISO2Hs3WM | ||||
| 90aFPaHfqKpiPsJrnnOm270lZclgqEtpsyuLsAClqxytCYPw4zTa6WOfDJtmVUrT | ||||
| 1fvMjqwzvs7jbNrgfkwSxXiABwTMQQWeAtuSO+zZH4Ms10qyANoh4FFi/oS3dRKQ | ||||
| 0kdu7AsJqnou9q9HWq1WCTqMcyNE0KPHuo4xhtOlWoGbsugTs7XBml30D7bKJVfG | ||||
| PazsY1b0/cx7 | ||||
| -----END CERTIFICATE REQUEST----- | ||||
| @@ -1,51 +0,0 @@ | ||||
| -----BEGIN RSA PRIVATE KEY----- | ||||
| MIIJKAIBAAKCAgEAlZJORzCjbzF1SaCXFHyitudlE+q3Z5bGhS2TpXG6d6Laoqtw | ||||
| No4UC3i+TnMndtrP2pNkV/ZYivsp1fHz92FqFAT+XpYcG8pLm4iL0k0UufOWdLPT | ||||
| X87HCJjKZ4r7fmzstyjqK4sv9I3ye1jKi0VE1BLrF1KVvEE/1PXCug68EBP/aF06 | ||||
| +uvcr6o8hbMYzgdKSzhRYm9C3kGcawNofqAD/Kk/zn+pMk4Bloy4UgtXFXgj2bEn | ||||
| mVE+tRWyLv2+TONlmLnXaBW3/MvZtKC3mIs2KG+6aBNuY8PdWzWvMtp30r/ibgnH | ||||
| zuMKtvXJ5XRhTaST4QYXNbGwb1bIV1ylnX8zdXPEQkuYTQDctaYQCe0RXt1I9Fp3 | ||||
| gVQRxyTM+0IetbsU0k9VvBwQ07mgU8Rik3DxVnfbuJY/wREnERTkgv6ojtRwiszr | ||||
| GIY5x36peRs30CqRMv3uJtqC/FU6nCQbHxwssQyB/umN6L7bcpsQFDydeK95hvRQ | ||||
| y6tb2v/vMcw7MMo5kSFUHjoL5Zc4DObwiqs+p7F7S0WIJMBzJOcjmgCMzgZ7Jmc7 | ||||
| bMmrm43GLzOaVLIjuPVVpOp7YgJ/lqRf7K3hZXrMdaXkCm01aL8L59d+3Vfdjp3H | ||||
| HvmYpCh8bc+Kjs/nR9Rc+2JKK/H13LH3W5Cr8Fnc/FP6TgbvvNwsQV01gG8CAwEA | ||||
| AQKCAgBLBQn8DPo8YDsqxcBhRy45vQ/mkHiTHX3O+JAwkD1tmiI9Ku3qfxKwukwB | ||||
| fyKRK6jLQdg3gljgxJ80Ltol/xc8mVCYUoQgsDOB/FfdEEpQBkw1lqhzSnxr5G7I | ||||
| xl3kCHAmYgAp/PL9n2C620sj1YdzM1X06bgupy+D+gxEU/WhvtYBG5nklv6moSUg | ||||
| DjdnxyJNXh7710Bbx97Tke8Ma+f0B1P4l/FeSN/lCgm9JPD11L9uhbuN28EvBIXN | ||||
| qfmUCQ5BLx1KmHIi+n/kaCQN/+0XFQsS/oQEyA2znNaWFBu7egDxHji4nQoXwGoW | ||||
| i2vujJibafmkNc5/2bA8mTx8JXvCLhU2L9j2ZumpKOda0g+pfMauesL+9rvZdqwW | ||||
| gjdjndOHZlg3qm40hGCDBVmmV3mdnvXrk1BbuB4Y0N7qGo3PyYtJHGwJILaNQVGR | ||||
| Sj75uTatxJwFXsqSaJaErV3Q90IiyXX4AOFGnWHOs29GEwtnDbCvT/rzqutTYSXD | ||||
| Yv0XFDznzJelhZTH7FbaW3FW3YGEG1ER/0MtKpsAH4i7H9q3KKK8yrzUsgUkGwXt | ||||
| xtoLckh91xilPIGbzARdELTEdHrjlFL+qaz3PIqEQScWz3WBu2JcIzGbp6PQfMZ+ | ||||
| FZXarEb/ADZuX0+WoKFYR5jzwMoQfF/fxe2Ib/37ETNw4BgfSQKCAQEAxOw64XgO | ||||
| nUVJslzGK/H5fqTVpD1rfRmvVAiSDLAuWpClbpDZXqEPuoPPYsiccuUWu9VkJE1F | ||||
| 6MZEexGx1jFkN08QUHD1Bobzu6ThaBc2PrWHRjFGKM60d0AkhOiL4N04FGwVeCN6 | ||||
| xzIJFk1E4VOOo1+lzeAWRvi1lwuWTgQi+m25nwBJtmYdBLGeS+DXy80Fi6deECei | ||||
| ipDzJ4rxJsZ61uqBeYC4CfuHW9m5rCzJWPMMMFrPdl3OxEyZzKng4Co5EYc5i/QH | ||||
| piXD6IJayKcTPRK3tBJZp2YCIIdtQLcjAwmDEDowQtelHkbTihXMGRarf3VcOEoN | ||||
| ozMRgcLEEynuKwKCAQEAwnF5ZkkJEL/1MCOZ6PZfSKl35ZMIz/4Umk8hOMAQGhCT | ||||
| cnxlDUfGSBu4OihdBbIuBSBsYDjgcev8uyiIPDVy0FIkBKRGfgrNCLDh19aHljvE | ||||
| bUc3akvbft0mro86AvSd/Rpc7sj841bru37RDUm6AJOtIvb6DWUpMOZgMm0WMmSI | ||||
| kNs/UT+7rqg+AZPP8lumnJIFnRK38xOehQAaS1FHWGP//38py8yo8eXpMsoCWMch | ||||
| c+kZD2jsAYV+SWjjkZjcrv/52+asd4AotRXIShV8E8xItQeq6vLHKOaIe0tC2Y44 | ||||
| ONAKiu4dgABt1voy8I5J63MwgeNmgAUS+KsgUclYzQKCAQEAlt/3bPAzIkQH5uQ1 | ||||
| 4U2PvnxEQ4XbaQnYzyWR4K7LlQ/l8ASCxoHYLyr2JdVWKKFk/ZzNERMzUNk3dqNk | ||||
| AZvuEII/GaKx2MJk04vMN5gxM3KZpinyeymEEynN0RbqtOpJITx+ZoGofB3V4IRr | ||||
| FciTLJEH0+iwqMe9OXDjQ/rfYcfXw/7QezNZYFNF2RT3wWnfqdQduXrkig3sfotx | ||||
| oCfJzgf2E0WPu/Y/CxyRqVzXF5N/7zxkX2gYF0YpQCmX5afz+X4FlTju81lT9DyL | ||||
| mdiIYO6KWSkGD7+UOaAJEOA/rwAGrtQmTdAy7jONt+pjaYV4+DrO4UG7mSJzc1vq | ||||
| JlSl6QKCAQARqwPv8mT7e6XI2QNMMs7XqGZ3mtOrKpguqVAIexM7exQazAjWmxX+ | ||||
| SV6FElPZh6Y82wRd/e0PDPVrADTY27ZyDXSuY0rwewTEbGYpGZo6YXXoxBbZ9sic | ||||
| D3ZLWEJaMGYGsJWPMP4hni1PXSebwH5BPSn3Sl/QRcfnZJeLHXRt4cqy9uka9eKU | ||||
| 7T6tIAQ+LmvGQFJ4QlIqqTa3ORoqi9kiw/tn+OMQXKlhSZXWApsR/A4jHSQkzVDc | ||||
| loeyHfDHsw8ia6oFfEFhnmiUg8UuTiN3HRHiOS8jqCnGoqP2KBGL+StMpkK++wH9 | ||||
| NozEgvmL+DHpTg8zTjlrGortw4btR5FlAoIBABVni+EsGA5K/PM1gIct2pDm+6Kq | ||||
| UCYScTwIjftuwKLk/KqermG9QJLiJouKO3ZSz7iCelu87Dx1cKeXrc2LQ1pnQzCB | ||||
| JnI6BCT+zRnQFXjLokJXD2hIS2hXhqV6/9FRXLKKMYePcDxWt/etLNGmpLnhDfb3 | ||||
| sMOH/9pnaGmtk36Ce03Hh7E1C6io/MKfTq+KKUV1UGwO1BdNQCiclkYzAUqn1O+Y | ||||
| c8BaeGKc2c6as8DKrPTGGQGmzo/ZUxQVfVFl2g7+HXISWBBcui/G5gtnU1afZqbW | ||||
| mTmDoqs4510vhlkhN9XZ0DyhewDIqNNGEY2vS1x2fJz1XC2Eve4KpSyUsiE= | ||||
| -----END RSA PRIVATE KEY----- | ||||
| @@ -1,30 +0,0 @@ | ||||
| -----BEGIN CERTIFICATE----- | ||||
| MIIFJjCCAw6gAwIBAgIJAOcEAbv8NslfMA0GCSqGSIb3DQEBCwUAMEAxCzAJBgNV | ||||
| BAYTAlVTMQswCQYDVQQIDAJDQTETMBEGA1UECgwKQWNtZSwgSW5jLjEPMA0GA1UE | ||||
| AwwGc29tZUNBMCAXDTE4MDYwODEzMzkyNFoYDzIyMTgwNDIxMTMzOTI0WjBDMQsw | ||||
| CQYDVQQGEwJVUzELMAkGA1UECAwCQ0ExEzARBgNVBAoMCkFjbWUsIEluYy4xEjAQ | ||||
| BgNVBAMMCWxvY2FsaG9zdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB | ||||
| AJWSTkcwo28xdUmglxR8orbnZRPqt2eWxoUtk6Vxunei2qKrcDaOFAt4vk5zJ3ba | ||||
| z9qTZFf2WIr7KdXx8/dhahQE/l6WHBvKS5uIi9JNFLnzlnSz01/OxwiYymeK+35s | ||||
| 7Lco6iuLL/SN8ntYyotFRNQS6xdSlbxBP9T1wroOvBAT/2hdOvrr3K+qPIWzGM4H | ||||
| Sks4UWJvQt5BnGsDaH6gA/ypP85/qTJOAZaMuFILVxV4I9mxJ5lRPrUVsi79vkzj | ||||
| ZZi512gVt/zL2bSgt5iLNihvumgTbmPD3Vs1rzLad9K/4m4Jx87jCrb1yeV0YU2k | ||||
| k+EGFzWxsG9WyFdcpZ1/M3VzxEJLmE0A3LWmEAntEV7dSPRad4FUEcckzPtCHrW7 | ||||
| FNJPVbwcENO5oFPEYpNw8VZ327iWP8ERJxEU5IL+qI7UcIrM6xiGOcd+qXkbN9Aq | ||||
| kTL97ibagvxVOpwkGx8cLLEMgf7pjei+23KbEBQ8nXiveYb0UMurW9r/7zHMOzDK | ||||
| OZEhVB46C+WXOAzm8IqrPqexe0tFiCTAcyTnI5oAjM4GeyZnO2zJq5uNxi8zmlSy | ||||
| I7j1VaTqe2ICf5akX+yt4WV6zHWl5AptNWi/C+fXft1X3Y6dxx75mKQofG3Pio7P | ||||
| 50fUXPtiSivx9dyx91uQq/BZ3PxT+k4G77zcLEFdNYBvAgMBAAGjHjAcMBoGA1Ud | ||||
| EQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAgEABL8kffi7 | ||||
| 48qSD+/l/UwCYdmqta1vAbOkvLnPtfXe1XlDpJipNuPxUBc8nNTemtrbg0erNJnC | ||||
| jQHodqmdKBJJOdaEKTwAGp5pYvvjlU3WasmhfJy+QwOWgeqjJcTUo3+DEaHRls16 | ||||
| AZXlsp3hB6z0gzR/qzUuZwpMbL477JpuZtAcwLYeVvLG8bQRyWyEy8JgGDoYSn8s | ||||
| Z16s+r6AX+cnL/2GHkZ+oc3iuXJbnac4xfWTKDiYnyzK6RWRnoyro7X0jiPz6XX3 | ||||
| wyoWzB1uMSCXscrW6ZcKyKqz75lySLuwGxOMhX4nGOoYHY0ZtrYn5WK2ZAJxsQnn | ||||
| 8QcjPB0nq37U7ifk1uebmuXe99iqyKnWaLvlcpe+HnO5pVxFkSQEf7Zh+hEnRDkN | ||||
| IBzLFnqwDS1ug/oQ1aSvc8oBh2ylKDJuGtPNqGKibNJyb2diXO/aEUOKRUKPAxKa | ||||
| dbKsc4Y1bhZNN3/MICMoyghwAOiuwUQMR5uhxTkQmZUwNrPFa+eW6GvyoYLFUsZs | ||||
| hZfWLNGD5mLADElxs0HF7F9Zk6pSocTDXba4d4lfxsq88SyZZ7PbjJYFRfLQPzd1 | ||||
| CfvpRPqolEmZo1Y5Q644PELYiJRKpBxmX5GtC5j5eaUD9XdGKvXsGhb0m0gW75rq | ||||
| iUnnLkZt2ya1cDJDiCnJjo7r5KxMo0XXFDc= | ||||
| -----END CERTIFICATE----- | ||||
| @@ -1,284 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| 	"regexp" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/find" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/soap" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // IsNotFound return true if err is NotFoundError or DefaultNotFoundError | ||||
| func IsNotFound(err error) bool { | ||||
| 	_, ok := err.(*find.NotFoundError) | ||||
| 	if ok { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	_, ok = err.(*find.DefaultNotFoundError) | ||||
| 	if ok { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func getFinder(dc *Datacenter) *find.Finder { | ||||
| 	finder := find.NewFinder(dc.Client(), false) | ||||
| 	finder.SetDatacenter(dc.Datacenter) | ||||
| 	return finder | ||||
| } | ||||
|  | ||||
| // formatVirtualDiskUUID removes any spaces and hyphens in UUID | ||||
| // Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa | ||||
| func formatVirtualDiskUUID(uuid string) string { | ||||
| 	uuidwithNoSpace := strings.Replace(uuid, " ", "", -1) | ||||
| 	uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1) | ||||
| 	return strings.ToLower(uuidWithNoHypens) | ||||
| } | ||||
|  | ||||
| // getSCSIControllersOfType filters specific type of Controller device from given list of Virtual Machine Devices | ||||
| func getSCSIControllersOfType(vmDevices object.VirtualDeviceList, scsiType string) []*types.VirtualController { | ||||
| 	// get virtual scsi controllers of passed argument type | ||||
| 	var scsiControllers []*types.VirtualController | ||||
| 	for _, device := range vmDevices { | ||||
| 		devType := vmDevices.Type(device) | ||||
| 		if devType == scsiType { | ||||
| 			if c, ok := device.(types.BaseVirtualController); ok { | ||||
| 				scsiControllers = append(scsiControllers, c.GetVirtualController()) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return scsiControllers | ||||
| } | ||||
|  | ||||
| // getAvailableSCSIController gets available SCSI Controller from list of given controllers, which has less than 15 disk devices. | ||||
| func getAvailableSCSIController(scsiControllers []*types.VirtualController) *types.VirtualController { | ||||
| 	// get SCSI controller which has space for adding more devices | ||||
| 	for _, controller := range scsiControllers { | ||||
| 		if len(controller.Device) < SCSIControllerDeviceLimit { | ||||
| 			return controller | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // getNextUnitNumber gets the next available SCSI controller unit number from given list of Controller Device List | ||||
| func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { | ||||
| 	var takenUnitNumbers [SCSIDeviceSlots]bool | ||||
| 	takenUnitNumbers[SCSIReservedSlot] = true | ||||
| 	key := c.GetVirtualController().Key | ||||
|  | ||||
| 	for _, device := range devices { | ||||
| 		d := device.GetVirtualDevice() | ||||
| 		if d.ControllerKey == key { | ||||
| 			if d.UnitNumber != nil && *d.UnitNumber < SCSIDeviceSlots { | ||||
| 				takenUnitNumbers[*d.UnitNumber] = true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	for unitNumber, takenUnitNumber := range takenUnitNumbers { | ||||
| 		if !takenUnitNumber { | ||||
| 			return int32(unitNumber), nil | ||||
| 		} | ||||
| 	} | ||||
| 	return -1, fmt.Errorf("SCSI Controller with key=%d does not have any available slots", key) | ||||
| } | ||||
|  | ||||
| // getSCSIControllers filters and return list of Controller Devices from given list of Virtual Machine Devices. | ||||
| func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController { | ||||
| 	// get all virtual scsi controllers | ||||
| 	var scsiControllers []*types.VirtualController | ||||
| 	for _, device := range vmDevices { | ||||
| 		devType := vmDevices.Type(device) | ||||
| 		switch devType { | ||||
| 		case SCSIControllerType, strings.ToLower(LSILogicControllerType), strings.ToLower(BusLogicControllerType), PVSCSIControllerType, strings.ToLower(LSILogicSASControllerType): | ||||
| 			if c, ok := device.(types.BaseVirtualController); ok { | ||||
| 				scsiControllers = append(scsiControllers, c.GetVirtualController()) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return scsiControllers | ||||
| } | ||||
|  | ||||
| // RemoveStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath | ||||
| // for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk | ||||
| // for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk | ||||
| func RemoveStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string { | ||||
| 	datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1] | ||||
| 	if filepath.Base(datastore) != datastore { | ||||
| 		vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1) | ||||
| 	} | ||||
| 	return vDiskPath | ||||
| } | ||||
|  | ||||
| // GetPathFromVMDiskPath retrieves the path from VM Disk Path. | ||||
| // Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk | ||||
| func GetPathFromVMDiskPath(vmDiskPath string) string { | ||||
| 	datastorePathObj := new(object.DatastorePath) | ||||
| 	isSuccess := datastorePathObj.FromString(vmDiskPath) | ||||
| 	if !isSuccess { | ||||
| 		klog.Errorf("Failed to parse vmDiskPath: %s", vmDiskPath) | ||||
| 		return "" | ||||
| 	} | ||||
| 	return datastorePathObj.Path | ||||
| } | ||||
|  | ||||
| // GetDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. | ||||
| func GetDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) { | ||||
| 	datastorePathObj := new(object.DatastorePath) | ||||
| 	isSuccess := datastorePathObj.FromString(vmDiskPath) | ||||
| 	if !isSuccess { | ||||
| 		klog.Errorf("Failed to parse volPath: %s", vmDiskPath) | ||||
| 		return nil, fmt.Errorf("failed to parse volPath: %s", vmDiskPath) | ||||
| 	} | ||||
| 	return datastorePathObj, nil | ||||
| } | ||||
|  | ||||
| // IsValidUUID checks if the string is a valid UUID. | ||||
| func IsValidUUID(uuid string) bool { | ||||
| 	r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") | ||||
| 	return r.MatchString(uuid) | ||||
| } | ||||
|  | ||||
| // IsManagedObjectNotFoundError returns true if error is of type ManagedObjectNotFound | ||||
| func IsManagedObjectNotFoundError(err error) bool { | ||||
| 	isManagedObjectNotFoundError := false | ||||
| 	if soap.IsSoapFault(err) { | ||||
| 		_, isManagedObjectNotFoundError = soap.ToSoapFault(err).VimFault().(types.ManagedObjectNotFound) | ||||
| 	} | ||||
| 	return isManagedObjectNotFoundError | ||||
| } | ||||
|  | ||||
| // IsInvalidCredentialsError returns true if error is of type InvalidLogin | ||||
| func IsInvalidCredentialsError(err error) bool { | ||||
| 	isInvalidCredentialsError := false | ||||
| 	if soap.IsSoapFault(err) { | ||||
| 		_, isInvalidCredentialsError = soap.ToSoapFault(err).VimFault().(types.InvalidLogin) | ||||
| 	} | ||||
| 	return isInvalidCredentialsError | ||||
| } | ||||
|  | ||||
| // VerifyVolumePathsForVMDevices verifies if the volume paths (volPaths) are attached to VM. | ||||
| func VerifyVolumePathsForVMDevices(vmDevices object.VirtualDeviceList, volPaths []string, nodeName string, nodeVolumeMap map[string]map[string]bool) { | ||||
| 	volPathsMap := make(map[string]bool) | ||||
| 	for _, volPath := range volPaths { | ||||
| 		volPathsMap[volPath] = true | ||||
| 	} | ||||
| 	// Verify if the volume paths are present on the VM backing virtual disk devices | ||||
| 	for _, device := range vmDevices { | ||||
| 		if vmDevices.TypeName(device) == "VirtualDisk" { | ||||
| 			virtualDevice := device.GetVirtualDevice() | ||||
| 			if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { | ||||
| 				if volPathsMap[backing.FileName] { | ||||
| 					setNodeVolumeMap(nodeVolumeMap, backing.FileName, nodeName, true) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| } | ||||
|  | ||||
| // isvCenterNotSupported takes vCenter version and vCenter API version as input and return true if vCenter is no longer | ||||
| // supported by VMware for in-tree vSphere volume plugin | ||||
| func isvCenterNotSupported(vCenterVersion string, vCenterAPIVersion string) (bool, error) { | ||||
| 	var vcversion, vcapiversion, minvcversion vcVersion | ||||
| 	var err error | ||||
| 	err = vcversion.parse(vCenterVersion) | ||||
| 	if err != nil { | ||||
| 		return false, fmt.Errorf("failed to parse vCenter version: %s. err: %+v", vCenterVersion, err) | ||||
| 	} | ||||
| 	err = vcapiversion.parse(vCenterAPIVersion) | ||||
| 	if err != nil { | ||||
| 		return false, fmt.Errorf("failed to parse vCenter API version: %s. err: %+v", vCenterAPIVersion, err) | ||||
| 	} | ||||
| 	err = minvcversion.parse(MinvCenterVersion) | ||||
| 	if err != nil { | ||||
| 		return false, fmt.Errorf("failed to parse minimum vCenter version: %s. err: %+v", MinvCenterVersion, err) | ||||
| 	} | ||||
| 	if vcversion.isLessThan(minvcversion) && vcapiversion.isLessThan(minvcversion) { | ||||
| 		return true, nil | ||||
| 	} | ||||
| 	return false, nil | ||||
| } | ||||
|  | ||||
| // vcVersion represents a VC version | ||||
| type vcVersion struct { | ||||
| 	Major    int64 | ||||
| 	Minor    int64 | ||||
| 	Revision int64 | ||||
| 	Build    int64 | ||||
| } | ||||
|  | ||||
| // parse helps parse version string to VCVersion | ||||
| // returns error when parse fail | ||||
| func (v *vcVersion) parse(version string) error { | ||||
| 	for index, value := range strings.Split(version, ".") { | ||||
| 		var err error | ||||
| 		if index == 0 { | ||||
| 			v.Major, err = strconv.ParseInt(value, 10, 64) | ||||
| 		} else if index == 1 { | ||||
| 			v.Minor, err = strconv.ParseInt(value, 10, 64) | ||||
| 		} else if index == 2 { | ||||
| 			v.Revision, err = strconv.ParseInt(value, 10, 64) | ||||
| 		} else if index == 3 { | ||||
| 			v.Build, err = strconv.ParseInt(value, 10, 64) | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return fmt.Errorf("failed to parse version: %q, err: %v", version, err) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // isLessThan compares VCVersion v to o and returns | ||||
| // true if v is less than o | ||||
| func (v *vcVersion) isLessThan(o vcVersion) bool { | ||||
| 	if v.Major != o.Major { | ||||
| 		if v.Major > o.Major { | ||||
| 			return false | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	if v.Minor != o.Minor { | ||||
| 		if v.Minor > o.Minor { | ||||
| 			return false | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	if v.Revision != o.Revision { | ||||
| 		if v.Revision > o.Revision { | ||||
| 			return false | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	if v.Build != o.Build { | ||||
| 		if v.Build > o.Build { | ||||
| 			return false | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
| @@ -1,163 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/simulator" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| ) | ||||
|  | ||||
| func TestUtils(t *testing.T) { | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	model := simulator.VPX() | ||||
| 	// Child folder "F0" will be created under the root folder and datacenter folders, | ||||
| 	// and all resources are created within the "F0" child folders. | ||||
| 	model.Folder = 1 | ||||
|  | ||||
| 	defer model.Remove() | ||||
| 	err := model.Create() | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	s := model.Service.NewServer() | ||||
| 	defer s.Close() | ||||
|  | ||||
| 	c, err := govmomi.NewClient(ctx, s.URL, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	vc := &VSphereConnection{Client: c.Client} | ||||
|  | ||||
| 	dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	finder := getFinder(dc) | ||||
| 	datastores, err := finder.DatastoreList(ctx, "*") | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	count := model.Count() | ||||
| 	if count.Datastore != len(datastores) { | ||||
| 		t.Errorf("got %d Datastores, expected: %d", len(datastores), count.Datastore) | ||||
| 	} | ||||
|  | ||||
| 	_, err = finder.Datastore(ctx, testNameNotFound) | ||||
| 	if !IsNotFound(err) { | ||||
| 		t.Errorf("unexpected error: %s", err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestIsvCenterNotSupported(t *testing.T) { | ||||
| 	type testsData struct { | ||||
| 		vcVersion      string | ||||
| 		vcAPIVersion   string | ||||
| 		isNotSupported bool | ||||
| 	} | ||||
| 	testdataArray := []testsData{ | ||||
| 		{"8.0.0", "8.0.0.0", false}, | ||||
| 		{"7.0.3", "7.0.3.0", false}, | ||||
| 		{"7.0.2", "7.0.2.0", false}, | ||||
| 		{"7.0.1", "7.0.1.1", true}, | ||||
| 		{"7.0.0", "7.0.0.0", true}, | ||||
| 		{"6.7.0", "6.7.3", true}, | ||||
| 		{"6.7.0", "6.7", true}, | ||||
| 		{"6.7.0", "6.7.2", true}, | ||||
| 		{"6.7.0", "6.7.1", true}, | ||||
| 		{"6.5.0", "6.5", true}, | ||||
| 	} | ||||
|  | ||||
| 	for _, test := range testdataArray { | ||||
| 		notsupported, err := isvCenterNotSupported(test.vcVersion, test.vcAPIVersion) | ||||
| 		if err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| 		if notsupported != test.isNotSupported { | ||||
| 			t.Fatalf("test failed for vc version: %q and vc API version: %q", | ||||
| 				test.vcVersion, test.vcAPIVersion) | ||||
| 		} else { | ||||
| 			t.Logf("test for vc version: %q and vc API version: %q passed. Is Not Supported : %v", | ||||
| 				test.vcAPIVersion, test.vcAPIVersion, notsupported) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestGetNextUnitNumber(t *testing.T) { | ||||
| 	type testData struct { | ||||
| 		name        string | ||||
| 		deviceList  object.VirtualDeviceList | ||||
| 		expectValue int32 | ||||
| 		expectError bool | ||||
| 	} | ||||
| 	tests := []testData{ | ||||
| 		{ | ||||
| 			name:        "should return 3 when devices 0-2 taken", | ||||
| 			deviceList:  generateVirtualDeviceList([]int32{0, 1, 2}), | ||||
| 			expectValue: 3, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "should return 0 when devices 1-3 taken", | ||||
| 			deviceList:  generateVirtualDeviceList([]int32{1, 2, 3}), | ||||
| 			expectValue: 0, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "should return error when no slots available", | ||||
| 			deviceList:  generateVirtualDeviceList([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), | ||||
| 			expectValue: -1, | ||||
| 			expectError: true, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "should ignore invalid UnitNumber in device list", | ||||
| 			deviceList:  generateVirtualDeviceList([]int32{0, 1, 16}), | ||||
| 			expectValue: 2, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	controller := &types.VirtualController{} | ||||
| 	for _, test := range tests { | ||||
| 		val, err := getNextUnitNumber(test.deviceList, controller) | ||||
| 		if err != nil && !test.expectError { | ||||
| 			t.Fatalf("%s: unexpected error: %v", test.name, err) | ||||
| 		} | ||||
| 		if val != test.expectValue { | ||||
| 			t.Fatalf("%s: expected value %v but got %v", test.name, test.expectValue, val) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func generateVirtualDeviceList(unitNumbers []int32) object.VirtualDeviceList { | ||||
| 	deviceList := object.VirtualDeviceList{} | ||||
| 	for _, val := range unitNumbers { | ||||
| 		unitNum := val | ||||
| 		dev := &types.VirtualDevice{ | ||||
| 			Key:        unitNum, | ||||
| 			UnitNumber: &unitNum, | ||||
| 		} | ||||
| 		deviceList = append(deviceList, dev) | ||||
| 	} | ||||
| 	return deviceList | ||||
| } | ||||
| @@ -1,472 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/property" | ||||
| 	"github.com/vmware/govmomi/vim25" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // VirtualMachine extends the govmomi VirtualMachine object | ||||
| type VirtualMachine struct { | ||||
| 	*object.VirtualMachine | ||||
| 	Datacenter *Datacenter | ||||
| } | ||||
|  | ||||
| // IsDiskAttached checks if disk is attached to the VM. | ||||
| func (vm *VirtualMachine) IsDiskAttached(ctx context.Context, diskPath string) (bool, error) { | ||||
| 	device, err := vm.getVirtualDeviceByPath(ctx, diskPath) | ||||
| 	if err != nil { | ||||
| 		return false, err | ||||
| 	} | ||||
| 	if device != nil { | ||||
| 		return true, nil | ||||
| 	} | ||||
| 	return false, nil | ||||
| } | ||||
|  | ||||
| // DeleteVM deletes the VM. | ||||
| func (vm *VirtualMachine) DeleteVM(ctx context.Context) error { | ||||
| 	destroyTask, err := vm.Destroy(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to delete the VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return destroyTask.Wait(ctx) | ||||
| } | ||||
|  | ||||
| // AttachDisk attaches the disk at location - vmDiskPath from Datastore - dsObj to the Virtual Machine | ||||
| // Additionally the disk can be configured with SPBM policy if volumeOptions.StoragePolicyID is non-empty. | ||||
| func (vm *VirtualMachine) AttachDisk(ctx context.Context, vmDiskPath string, volumeOptions *VolumeOptions) (string, error) { | ||||
| 	// Check if the diskControllerType is valid | ||||
| 	if !CheckControllerSupported(volumeOptions.SCSIControllerType) { | ||||
| 		return "", fmt.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions()) | ||||
| 	} | ||||
| 	vmDiskPathCopy := vmDiskPath | ||||
| 	vmDiskPath = RemoveStorageClusterORFolderNameFromVDiskPath(vmDiskPath) | ||||
| 	attached, err := vm.IsDiskAttached(ctx, vmDiskPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred while checking if disk is attached on VM: %q. vmDiskPath: %q, err: %+v", vm.InventoryPath, vmDiskPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	// If disk is already attached, return the disk UUID | ||||
| 	if attached { | ||||
| 		diskUUID, _ := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath) | ||||
| 		return diskUUID, nil | ||||
| 	} | ||||
|  | ||||
| 	if volumeOptions.StoragePolicyName != "" { | ||||
| 		pbmClient, err := NewPbmClient(ctx, vm.Client()) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Error occurred while creating new pbmClient. err: %+v", err) | ||||
| 			return "", err | ||||
| 		} | ||||
|  | ||||
| 		volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", volumeOptions.StoragePolicyName, err) | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	dsObj, err := vm.Datacenter.GetDatastoreByPath(ctx, vmDiskPathCopy) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get datastore from vmDiskPath: %q. err: %+v", vmDiskPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	// If disk is not attached, create a disk spec for disk to be attached to the VM. | ||||
| 	disk, newSCSIController, err := vm.CreateDiskSpec(ctx, vmDiskPath, dsObj, volumeOptions) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred while creating disk spec. err: %+v", err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	vmDevices, err := vm.Device(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} | ||||
| 	deviceConfigSpec := &types.VirtualDeviceConfigSpec{ | ||||
| 		Device:    disk, | ||||
| 		Operation: types.VirtualDeviceConfigSpecOperationAdd, | ||||
| 	} | ||||
| 	// Configure the disk with the SPBM profile only if ProfileID is not empty. | ||||
| 	if volumeOptions.StoragePolicyID != "" { | ||||
| 		profileSpec := &types.VirtualMachineDefinedProfileSpec{ | ||||
| 			ProfileId: volumeOptions.StoragePolicyID, | ||||
| 		} | ||||
| 		deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, profileSpec) | ||||
| 	} | ||||
| 	virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec) | ||||
| 	requestTime := time.Now() | ||||
| 	task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec) | ||||
| 	if err != nil { | ||||
| 		RecordvSphereMetric(APIAttachVolume, requestTime, err) | ||||
| 		klog.Errorf("Failed to attach the disk with storagePolicy: %q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) | ||||
| 		if newSCSIController != nil { | ||||
| 			nestedErr := vm.deleteController(ctx, newSCSIController, vmDevices) | ||||
| 			if nestedErr != nil { | ||||
| 				return "", fmt.Errorf("failed to delete SCSI Controller after reconfiguration failed with err=%v: %v", err, nestedErr) | ||||
| 			} | ||||
| 		} | ||||
| 		return "", err | ||||
| 	} | ||||
| 	err = task.Wait(ctx) | ||||
| 	RecordvSphereMetric(APIAttachVolume, requestTime, err) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to attach the disk with storagePolicy: %+q on VM: %q. err - %+v", volumeOptions.StoragePolicyID, vm.InventoryPath, err) | ||||
| 		if newSCSIController != nil { | ||||
| 			nestedErr := vm.deleteController(ctx, newSCSIController, vmDevices) | ||||
| 			if nestedErr != nil { | ||||
| 				return "", fmt.Errorf("failed to delete SCSI Controller after waiting for reconfiguration failed with err='%v': %v", err, nestedErr) | ||||
| 			} | ||||
| 		} | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	// Once disk is attached, get the disk UUID. | ||||
| 	diskUUID, err := vm.Datacenter.GetVirtualDiskPage83Data(ctx, vmDiskPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred while getting Disk Info from VM: %q. err: %v", vm.InventoryPath, err) | ||||
| 		nestedErr := vm.DetachDisk(ctx, vmDiskPath) | ||||
| 		if nestedErr != nil { | ||||
| 			return "", fmt.Errorf("failed to detach disk after getting VM UUID failed with err='%v': %v", err, nestedErr) | ||||
| 		} | ||||
| 		if newSCSIController != nil { | ||||
| 			nestedErr = vm.deleteController(ctx, newSCSIController, vmDevices) | ||||
| 			if nestedErr != nil { | ||||
| 				return "", fmt.Errorf("failed to delete SCSI Controller after getting VM UUID failed with err='%v': %v", err, nestedErr) | ||||
| 			} | ||||
| 		} | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return diskUUID, nil | ||||
| } | ||||
|  | ||||
| // GetHost returns host of the virtual machine | ||||
| func (vm *VirtualMachine) GetHost(ctx context.Context) (mo.HostSystem, error) { | ||||
| 	host, err := vm.HostSystem(ctx) | ||||
| 	var hostSystemMo mo.HostSystem | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return hostSystemMo, err | ||||
| 	} | ||||
|  | ||||
| 	s := object.NewSearchIndex(vm.Client()) | ||||
| 	err = s.Properties(ctx, host.Reference(), []string{"summary"}, &hostSystemMo) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) | ||||
| 		return hostSystemMo, err | ||||
| 	} | ||||
| 	return hostSystemMo, nil | ||||
| } | ||||
|  | ||||
| // DetachDisk detaches the disk specified by vmDiskPath | ||||
| func (vm *VirtualMachine) DetachDisk(ctx context.Context, vmDiskPath string) error { | ||||
| 	device, err := vm.getVirtualDeviceByPath(ctx, vmDiskPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Disk ID not found for VM: %q with diskPath: %q", vm.InventoryPath, vmDiskPath) | ||||
| 		return err | ||||
| 	} | ||||
| 	if device == nil { | ||||
| 		klog.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) | ||||
| 		return fmt.Errorf("No virtual device found with diskPath: %q on VM: %q", vmDiskPath, vm.InventoryPath) | ||||
| 	} | ||||
| 	// Detach disk from VM | ||||
| 	requestTime := time.Now() | ||||
| 	err = vm.RemoveDevice(ctx, true, device) | ||||
| 	RecordvSphereMetric(APIDetachVolume, requestTime, err) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred while removing disk device for VM: %q. err: %v", vm.InventoryPath, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // GetResourcePool gets the resource pool for VM. | ||||
| func (vm *VirtualMachine) GetResourcePool(ctx context.Context) (*object.ResourcePool, error) { | ||||
| 	vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"resourcePool"}) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get resource pool from VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return object.NewResourcePool(vm.Client(), vmMoList[0].ResourcePool.Reference()), nil | ||||
| } | ||||
|  | ||||
| // IsActive checks if the VM is active. | ||||
| // Returns true if VM is in poweredOn state. | ||||
| func (vm *VirtualMachine) IsActive(ctx context.Context) (bool, error) { | ||||
| 	vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary"}) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err) | ||||
| 		return false, err | ||||
| 	} | ||||
| 	if vmMoList[0].Summary.Runtime.PowerState == ActivePowerState { | ||||
| 		return true, nil | ||||
| 	} | ||||
|  | ||||
| 	return false, nil | ||||
| } | ||||
|  | ||||
| // Exists checks if VM exists and is not terminated | ||||
| func (vm *VirtualMachine) Exists(ctx context.Context) (bool, error) { | ||||
| 	vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*VirtualMachine{vm}, []string{"summary.runtime.powerState"}) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get VM Managed object with property summary. err: +%v", err) | ||||
| 		return false, err | ||||
| 	} | ||||
| 	// We check for VMs which are still available in vcenter and has not been terminated/removed from | ||||
| 	// disk and hence we consider PoweredOn,PoweredOff and Suspended as alive states. | ||||
| 	aliveStates := []types.VirtualMachinePowerState{ | ||||
| 		types.VirtualMachinePowerStatePoweredOff, | ||||
| 		types.VirtualMachinePowerStatePoweredOn, | ||||
| 		types.VirtualMachinePowerStateSuspended, | ||||
| 	} | ||||
| 	currentState := vmMoList[0].Summary.Runtime.PowerState | ||||
| 	for _, state := range aliveStates { | ||||
| 		if state == currentState { | ||||
| 			return true, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return false, nil | ||||
| } | ||||
|  | ||||
| // GetAllAccessibleDatastores gets the list of accessible Datastores for the given Virtual Machine | ||||
| func (vm *VirtualMachine) GetAllAccessibleDatastores(ctx context.Context) ([]*DatastoreInfo, error) { | ||||
| 	host, err := vm.HostSystem(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get host system for VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var hostSystemMo mo.HostSystem | ||||
| 	s := object.NewSearchIndex(vm.Client()) | ||||
| 	err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve datastores for host: %+v. err: %+v", host, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var dsRefList []types.ManagedObjectReference | ||||
| 	dsRefList = append(dsRefList, hostSystemMo.Datastore...) | ||||
|  | ||||
| 	var dsMoList []mo.Datastore | ||||
| 	pc := property.DefaultCollector(vm.Client()) | ||||
| 	properties := []string{DatastoreInfoProperty, NameProperty} | ||||
| 	err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ | ||||
| 			" dsObjList: %+v, properties: %+v, err: %v", dsRefList, properties, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klog.V(9).Infof("Result dsMoList: %+v", dsMoList) | ||||
| 	finder := getFinder(vm.Datacenter) | ||||
| 	var dsObjList []*DatastoreInfo | ||||
| 	for _, dsMo := range dsMoList { | ||||
| 		// use the finder so that InventoryPath is set correctly in ds | ||||
| 		ds, err := finder.Datastore(ctx, dsMo.Name) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Failed finding datastore: %s. err: %+v", dsMo.Name, err) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		datastore := Datastore{ds, vm.Datacenter} | ||||
| 		dsObjList = append(dsObjList, | ||||
| 			&DatastoreInfo{ | ||||
| 				&datastore, | ||||
| 				dsMo.Info.GetDatastoreInfo()}) | ||||
| 	} | ||||
| 	return dsObjList, nil | ||||
| } | ||||
|  | ||||
| // CreateDiskSpec creates a disk spec for disk | ||||
| func (vm *VirtualMachine) CreateDiskSpec(ctx context.Context, diskPath string, dsObj *Datastore, volumeOptions *VolumeOptions) (*types.VirtualDisk, types.BaseVirtualDevice, error) { | ||||
| 	var newSCSIController types.BaseVirtualDevice | ||||
| 	vmDevices, err := vm.Device(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve VM devices. err: %+v", err) | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 	// find SCSI controller of particular type from VM devices | ||||
| 	scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType) | ||||
| 	scsiController := getAvailableSCSIController(scsiControllersOfRequiredType) | ||||
| 	if scsiController == nil { | ||||
| 		newSCSIController, err = vm.createAndAttachSCSIController(ctx, volumeOptions.SCSIControllerType) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.InventoryPath, err) | ||||
| 			return nil, nil, err | ||||
| 		} | ||||
| 		// Get VM device list | ||||
| 		vmDevices, err := vm.Device(ctx) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Failed to retrieve VM devices. err: %v", err) | ||||
| 			return nil, nil, err | ||||
| 		} | ||||
| 		// verify scsi controller in virtual machine | ||||
| 		scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, volumeOptions.SCSIControllerType) | ||||
| 		scsiController = getAvailableSCSIController(scsiControllersOfRequiredType) | ||||
| 		if scsiController == nil { | ||||
| 			klog.Errorf("Cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) | ||||
| 			// attempt clean up of scsi controller | ||||
| 			if err := vm.deleteController(ctx, newSCSIController, vmDevices); err != nil { | ||||
| 				return nil, nil, fmt.Errorf("failed to delete SCSI controller after failing to find it on VM: %v", err) | ||||
| 			} | ||||
| 			return nil, nil, fmt.Errorf("cannot find SCSI controller of type: %q in VM", volumeOptions.SCSIControllerType) | ||||
| 		} | ||||
| 	} | ||||
| 	disk := vmDevices.CreateDisk(scsiController, dsObj.Reference(), diskPath) | ||||
| 	unitNumber, err := getNextUnitNumber(vmDevices, scsiController) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Cannot attach disk to VM, unitNumber limit reached - %+v.", err) | ||||
| 		return nil, nil, err | ||||
| 	} | ||||
| 	*disk.UnitNumber = unitNumber | ||||
| 	backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) | ||||
| 	backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent) | ||||
|  | ||||
| 	if volumeOptions.CapacityKB != 0 { | ||||
| 		disk.CapacityInKB = int64(volumeOptions.CapacityKB) | ||||
| 	} | ||||
| 	if volumeOptions.DiskFormat != "" { | ||||
| 		var diskFormat string | ||||
| 		diskFormat = DiskFormatValidType[volumeOptions.DiskFormat] | ||||
| 		switch diskFormat { | ||||
| 		case ThinDiskType: | ||||
| 			backing.ThinProvisioned = types.NewBool(true) | ||||
| 		case EagerZeroedThickDiskType: | ||||
| 			backing.EagerlyScrub = types.NewBool(true) | ||||
| 		default: | ||||
| 			backing.ThinProvisioned = types.NewBool(false) | ||||
| 		} | ||||
| 	} | ||||
| 	return disk, newSCSIController, nil | ||||
| } | ||||
|  | ||||
| // GetVirtualDiskPath gets the first available virtual disk devicePath from the VM | ||||
| func (vm *VirtualMachine) GetVirtualDiskPath(ctx context.Context) (string, error) { | ||||
| 	vmDevices, err := vm.Device(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	// filter vm devices to retrieve device for the given vmdk file identified by disk path | ||||
| 	for _, device := range vmDevices { | ||||
| 		if vmDevices.TypeName(device) == "VirtualDisk" { | ||||
| 			virtualDevice := device.GetVirtualDevice() | ||||
| 			if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { | ||||
| 				return backing.FileName, nil | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return "", nil | ||||
| } | ||||
|  | ||||
| // createAndAttachSCSIController creates and attachs the SCSI controller to the VM. | ||||
| func (vm *VirtualMachine) createAndAttachSCSIController(ctx context.Context, diskControllerType string) (types.BaseVirtualDevice, error) { | ||||
| 	// Get VM device list | ||||
| 	vmDevices, err := vm.Device(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to retrieve VM devices for VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	allSCSIControllers := getSCSIControllers(vmDevices) | ||||
| 	if len(allSCSIControllers) >= SCSIControllerLimit { | ||||
| 		// we reached the maximum number of controllers we can attach | ||||
| 		klog.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) | ||||
| 		return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) | ||||
| 	} | ||||
| 	newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create new SCSI controller on VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController() | ||||
| 	hotAndRemove := true | ||||
| 	configNewSCSIController.HotAddRemove = &hotAndRemove | ||||
| 	configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing) | ||||
|  | ||||
| 	// add the scsi controller to virtual machine | ||||
| 	err = vm.AddDevice(context.TODO(), newSCSIController) | ||||
| 	if err != nil { | ||||
| 		klog.V(LogLevel).Infof("Cannot add SCSI controller to VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		// attempt clean up of scsi controller | ||||
| 		nestedErr := vm.deleteController(ctx, newSCSIController, vmDevices) | ||||
| 		if nestedErr != nil { | ||||
| 			return nil, fmt.Errorf("failed to delete SCSI controller after failing to add it to vm with err='%v': %v", err, nestedErr) | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return newSCSIController, nil | ||||
| } | ||||
|  | ||||
| // getVirtualDeviceByPath gets the virtual device by path | ||||
| func (vm *VirtualMachine) getVirtualDeviceByPath(ctx context.Context, diskPath string) (types.BaseVirtualDevice, error) { | ||||
| 	vmDevices, err := vm.Device(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// filter vm devices to retrieve device for the given vmdk file identified by disk path | ||||
| 	for _, device := range vmDevices { | ||||
| 		if vmDevices.TypeName(device) == "VirtualDisk" { | ||||
| 			virtualDevice := device.GetVirtualDevice() | ||||
| 			if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { | ||||
| 				if matchVirtualDiskAndVolPath(backing.FileName, diskPath) { | ||||
| 					klog.V(LogLevel).Infof("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) | ||||
| 					return device, nil | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
| func matchVirtualDiskAndVolPath(diskPath, volPath string) bool { | ||||
| 	fileExt := ".vmdk" | ||||
| 	diskPath = strings.TrimSuffix(diskPath, fileExt) | ||||
| 	volPath = strings.TrimSuffix(volPath, fileExt) | ||||
| 	return diskPath == volPath | ||||
| } | ||||
|  | ||||
| // deleteController removes latest added SCSI controller from VM. | ||||
| func (vm *VirtualMachine) deleteController(ctx context.Context, controllerDevice types.BaseVirtualDevice, vmDevices object.VirtualDeviceList) error { | ||||
| 	controllerDeviceList := vmDevices.SelectByType(controllerDevice) | ||||
| 	if len(controllerDeviceList) < 1 { | ||||
| 		return ErrNoDevicesFound | ||||
| 	} | ||||
| 	device := controllerDeviceList[len(controllerDeviceList)-1] | ||||
| 	err := vm.RemoveDevice(ctx, true, device) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error occurred while removing device on VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // RenewVM renews this virtual machine with new client connection. | ||||
| func (vm *VirtualMachine) RenewVM(client *vim25.Client) VirtualMachine { | ||||
| 	dc := Datacenter{Datacenter: object.NewDatacenter(client, vm.Datacenter.Reference())} | ||||
| 	newVM := object.NewVirtualMachine(client, vm.VirtualMachine.Reference()) | ||||
| 	return VirtualMachine{VirtualMachine: newVM, Datacenter: &dc} | ||||
| } | ||||
| @@ -1,170 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/simulator" | ||||
| ) | ||||
|  | ||||
| func TestVirtualMachine(t *testing.T) { | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	model := simulator.VPX() | ||||
|  | ||||
| 	defer model.Remove() | ||||
| 	err := model.Create() | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	s := model.Service.NewServer() | ||||
| 	defer s.Close() | ||||
|  | ||||
| 	c, err := govmomi.NewClient(ctx, s.URL, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	vc := &VSphereConnection{Client: c.Client} | ||||
|  | ||||
| 	dc, err := GetDatacenter(ctx, vc, TestDefaultDatacenter) | ||||
| 	if err != nil { | ||||
| 		t.Error(err) | ||||
| 	} | ||||
|  | ||||
| 	folders, err := dc.Folders(ctx) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	folder, err := dc.GetFolderByPath(ctx, folders.VmFolder.InventoryPath) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	vms, err := folder.GetVirtualMachines(ctx) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	if len(vms) == 0 { | ||||
| 		t.Fatal("no VMs") | ||||
| 	} | ||||
|  | ||||
| 	for _, vm := range vms { | ||||
| 		all, err := vm.GetAllAccessibleDatastores(ctx) | ||||
| 		if err != nil { | ||||
| 			t.Error(err) | ||||
| 		} | ||||
| 		if len(all) == 0 { | ||||
| 			t.Error("no accessible datastores") | ||||
| 		} | ||||
|  | ||||
| 		_, err = vm.GetResourcePool(ctx) | ||||
| 		if err != nil { | ||||
| 			t.Error(err) | ||||
| 		} | ||||
|  | ||||
| 		diskPath, err := vm.GetVirtualDiskPath(ctx) | ||||
| 		if err != nil { | ||||
| 			t.Error(err) | ||||
| 		} | ||||
|  | ||||
| 		options := &VolumeOptions{SCSIControllerType: PVSCSIControllerType} | ||||
|  | ||||
| 		for _, expect := range []bool{true, false} { | ||||
| 			attached, err := vm.IsDiskAttached(ctx, diskPath) | ||||
| 			if err != nil { | ||||
| 				t.Error(err) | ||||
| 			} | ||||
|  | ||||
| 			if attached != expect { | ||||
| 				t.Errorf("attached=%t, expected=%t", attached, expect) | ||||
| 			} | ||||
|  | ||||
| 			uuid, err := vm.AttachDisk(ctx, diskPath, options) | ||||
| 			if err != nil { | ||||
| 				t.Error(err) | ||||
| 			} | ||||
| 			if uuid == "" { | ||||
| 				t.Error("missing uuid") | ||||
| 			} | ||||
|  | ||||
| 			err = vm.DetachDisk(ctx, diskPath) | ||||
| 			if err != nil { | ||||
| 				t.Error(err) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for _, turnOff := range []bool{true, false} { | ||||
| 			// Turn off for checking if exist return true | ||||
| 			if turnOff { | ||||
| 				_, _ = vm.PowerOff(ctx) | ||||
| 			} | ||||
|  | ||||
| 			exist, err := vm.Exists(ctx) | ||||
| 			if err != nil { | ||||
| 				t.Error(err) | ||||
| 			} | ||||
| 			if !exist { | ||||
| 				t.Errorf("exist=%t, expected=%t", exist, true) | ||||
| 			} | ||||
|  | ||||
| 			// Turn back on | ||||
| 			if turnOff { | ||||
| 				_, _ = vm.PowerOn(ctx) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for _, expect := range []bool{true, false} { | ||||
| 			active, err := vm.IsActive(ctx) | ||||
| 			if err != nil { | ||||
| 				t.Error(err) | ||||
| 			} | ||||
|  | ||||
| 			if active != expect { | ||||
| 				t.Errorf("active=%t, expected=%t", active, expect) | ||||
| 			} | ||||
|  | ||||
| 			if expect { | ||||
| 				// Expecting to hit the error path since the VM is still powered on | ||||
| 				err = vm.DeleteVM(ctx) | ||||
| 				if err == nil { | ||||
| 					t.Error("expected error") | ||||
| 				} | ||||
| 				_, _ = vm.PowerOff(ctx) | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			// Should be able to delete now that VM power is off | ||||
| 			err = vm.DeleteVM(ctx) | ||||
| 			if err != nil { | ||||
| 				t.Error(err) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		// Expecting Exists func to throw error if VM deleted | ||||
| 		_, err = vm.Exists(ctx) | ||||
| 		if err == nil { | ||||
| 			t.Error("expected error") | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @@ -1,27 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| ) | ||||
|  | ||||
| // VMOptions provides helper objects for provisioning volume with SPBM Policy | ||||
| type VMOptions struct { | ||||
| 	VMFolder       *Folder | ||||
| 	VMResourcePool *object.ResourcePool | ||||
| } | ||||
| @@ -1,111 +0,0 @@ | ||||
| /* | ||||
| Copyright 2016 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
|  | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| // VolumeOptions specifies various options for a volume. | ||||
| type VolumeOptions struct { | ||||
| 	CapacityKB             int | ||||
| 	Tags                   map[string]string | ||||
| 	Name                   string | ||||
| 	DiskFormat             string | ||||
| 	Datastore              string | ||||
| 	VSANStorageProfileData string | ||||
| 	StoragePolicyName      string | ||||
| 	StoragePolicyID        string | ||||
| 	SCSIControllerType     string | ||||
| 	Zone                   []string | ||||
| 	SelectedNode           *v1.Node | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	// DiskFormatValidType specifies the valid disk formats | ||||
| 	DiskFormatValidType = map[string]string{ | ||||
| 		ThinDiskType: ThinDiskType, | ||||
| 		strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType, | ||||
| 		strings.ToLower(ZeroedThickDiskType):      PreallocatedDiskType, | ||||
| 	} | ||||
| 	// SCSIControllerValidType specifies the supported SCSI controllers | ||||
| 	SCSIControllerValidType = []string{LSILogicControllerType, LSILogicSASControllerType, PVSCSIControllerType} | ||||
| ) | ||||
|  | ||||
| // DiskformatValidOptions generates Valid Options for Diskformat | ||||
| func DiskformatValidOptions() string { | ||||
| 	validopts := "" | ||||
| 	for diskformat := range DiskFormatValidType { | ||||
| 		validopts += diskformat + ", " | ||||
| 	} | ||||
| 	validopts = strings.TrimSuffix(validopts, ", ") | ||||
| 	return validopts | ||||
| } | ||||
|  | ||||
| // CheckDiskFormatSupported checks if the diskFormat is valid | ||||
| func CheckDiskFormatSupported(diskFormat string) bool { | ||||
| 	if DiskFormatValidType[diskFormat] == "" { | ||||
| 		klog.Errorf("Not a valid Disk Format. Valid options are %+q", DiskformatValidOptions()) | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // SCSIControllerTypeValidOptions generates valid options for SCSIControllerType | ||||
| func SCSIControllerTypeValidOptions() string { | ||||
| 	validopts := "" | ||||
| 	for _, controllerType := range SCSIControllerValidType { | ||||
| 		validopts += (controllerType + ", ") | ||||
| 	} | ||||
| 	validopts = strings.TrimSuffix(validopts, ", ") | ||||
| 	return validopts | ||||
| } | ||||
|  | ||||
| // CheckControllerSupported checks if the given controller type is valid | ||||
| func CheckControllerSupported(ctrlType string) bool { | ||||
| 	for _, c := range SCSIControllerValidType { | ||||
| 		if ctrlType == c { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	klog.Errorf("Not a valid SCSI Controller Type. Valid options are %q", SCSIControllerTypeValidOptions()) | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // VerifyVolumeOptions checks if volumeOptions.SCIControllerType is valid controller type | ||||
| func (volumeOptions VolumeOptions) VerifyVolumeOptions() error { | ||||
| 	// Validate only if SCSIControllerType is set by user. | ||||
| 	// Default value is set later in virtualDiskManager.Create and vmDiskManager.Create | ||||
| 	if volumeOptions.SCSIControllerType != "" { | ||||
| 		isValid := CheckControllerSupported(volumeOptions.SCSIControllerType) | ||||
| 		if !isValid { | ||||
| 			return fmt.Errorf("invalid scsiControllerType: %s", volumeOptions.SCSIControllerType) | ||||
| 		} | ||||
| 	} | ||||
| 	// ThinDiskType is the default, so skip the validation. | ||||
| 	if volumeOptions.DiskFormat != ThinDiskType { | ||||
| 		isValid := CheckDiskFormatSupported(volumeOptions.DiskFormat) | ||||
| 		if !isValid { | ||||
| 			return fmt.Errorf("invalid diskFormat: %s", volumeOptions.DiskFormat) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| @@ -1,192 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vclib | ||||
|  | ||||
| import ( | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/component-base/metrics" | ||||
| 	"k8s.io/component-base/metrics/legacyregistry" | ||||
| ) | ||||
|  | ||||
| // Cloud Provider API constants | ||||
| const ( | ||||
| 	APICreateVolume = "CreateVolume" | ||||
| 	APIDeleteVolume = "DeleteVolume" | ||||
| 	APIAttachVolume = "AttachVolume" | ||||
| 	APIDetachVolume = "DetachVolume" | ||||
| ) | ||||
|  | ||||
| // Cloud Provider Operation constants | ||||
| const ( | ||||
| 	OperationDeleteVolume                  = "DeleteVolumeOperation" | ||||
| 	OperationAttachVolume                  = "AttachVolumeOperation" | ||||
| 	OperationDetachVolume                  = "DetachVolumeOperation" | ||||
| 	OperationDiskIsAttached                = "DiskIsAttachedOperation" | ||||
| 	OperationDisksAreAttached              = "DisksAreAttachedOperation" | ||||
| 	OperationCreateVolume                  = "CreateVolumeOperation" | ||||
| 	OperationCreateVolumeWithPolicy        = "CreateVolumeWithPolicyOperation" | ||||
| 	OperationCreateVolumeWithRawVSANPolicy = "CreateVolumeWithRawVSANPolicyOperation" | ||||
| ) | ||||
|  | ||||
| var vCenterMetric *vcenterMetric | ||||
|  | ||||
| func init() { | ||||
| 	vCenterMetric = &vcenterMetric{ | ||||
| 		vCenterInfos: make(map[string]types.AboutInfo), | ||||
| 		mux:          sync.Mutex{}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // vsphereAPIMetric is for recording latency of Single API Call. | ||||
| var vsphereAPIMetric = metrics.NewHistogramVec( | ||||
| 	&metrics.HistogramOpts{ | ||||
| 		Name:           "cloudprovider_vsphere_api_request_duration_seconds", | ||||
| 		Help:           "Latency of vsphere api call", | ||||
| 		StabilityLevel: metrics.ALPHA, | ||||
| 	}, | ||||
| 	[]string{"request"}, | ||||
| ) | ||||
|  | ||||
| var vsphereAPIErrorMetric = metrics.NewCounterVec( | ||||
| 	&metrics.CounterOpts{ | ||||
| 		Name:           "cloudprovider_vsphere_api_request_errors", | ||||
| 		Help:           "vsphere Api errors", | ||||
| 		StabilityLevel: metrics.ALPHA, | ||||
| 	}, | ||||
| 	[]string{"request"}, | ||||
| ) | ||||
|  | ||||
| // vsphereOperationMetric is for recording latency of vSphere Operation which invokes multiple APIs to get the task done. | ||||
| var vsphereOperationMetric = metrics.NewHistogramVec( | ||||
| 	&metrics.HistogramOpts{ | ||||
| 		Name:           "cloudprovider_vsphere_operation_duration_seconds", | ||||
| 		Help:           "Latency of vsphere operation call", | ||||
| 		StabilityLevel: metrics.ALPHA, | ||||
| 	}, | ||||
| 	[]string{"operation"}, | ||||
| ) | ||||
|  | ||||
| var vsphereOperationErrorMetric = metrics.NewCounterVec( | ||||
| 	&metrics.CounterOpts{ | ||||
| 		Name:           "cloudprovider_vsphere_operation_errors", | ||||
| 		Help:           "vsphere operation errors", | ||||
| 		StabilityLevel: metrics.ALPHA, | ||||
| 	}, | ||||
| 	[]string{"operation"}, | ||||
| ) | ||||
|  | ||||
| var vsphereVersion = metrics.NewDesc( | ||||
| 	"cloudprovider_vsphere_vcenter_versions", | ||||
| 	"Versions for connected vSphere vCenters", | ||||
| 	[]string{"hostname", "version", "build"}, nil, | ||||
| 	metrics.ALPHA, "") | ||||
|  | ||||
| // RegisterMetrics registers all the API and Operation metrics | ||||
| func RegisterMetrics() { | ||||
| 	legacyregistry.MustRegister(vsphereAPIMetric) | ||||
| 	legacyregistry.MustRegister(vsphereAPIErrorMetric) | ||||
| 	legacyregistry.MustRegister(vsphereOperationMetric) | ||||
| 	legacyregistry.MustRegister(vsphereOperationErrorMetric) | ||||
| 	legacyregistry.CustomMustRegister(vCenterMetric) | ||||
| } | ||||
|  | ||||
| type vcenterMetric struct { | ||||
| 	metrics.BaseStableCollector | ||||
|  | ||||
| 	mux          sync.Mutex | ||||
| 	vCenterInfos map[string]types.AboutInfo | ||||
| } | ||||
|  | ||||
| func (collector *vcenterMetric) DescribeWithStability(ch chan<- *metrics.Desc) { | ||||
| 	ch <- vsphereVersion | ||||
| } | ||||
|  | ||||
| func (collector *vcenterMetric) CollectWithStability(ch chan<- metrics.Metric) { | ||||
| 	collector.mux.Lock() | ||||
| 	defer collector.mux.Unlock() | ||||
|  | ||||
| 	for vCenter, info := range collector.vCenterInfos { | ||||
| 		ch <- metrics.NewLazyMetricWithTimestamp(time.Now(), | ||||
| 			metrics.NewLazyConstMetric(vsphereVersion, | ||||
| 				metrics.GaugeValue, | ||||
| 				float64(1), | ||||
| 				vCenter, | ||||
| 				info.Version, | ||||
| 				info.Build)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (collector *vcenterMetric) setAbout(server string, info types.AboutInfo) { | ||||
| 	collector.mux.Lock() | ||||
| 	defer collector.mux.Unlock() | ||||
| 	collector.vCenterInfos[server] = info | ||||
| } | ||||
|  | ||||
| func setVCenterInfoMetric(connection *VSphereConnection) { | ||||
| 	vCenterMetric.setAbout(connection.Hostname, connection.Client.ServiceContent.About) | ||||
| } | ||||
|  | ||||
| // RecordvSphereMetric records the vSphere API and Operation metrics | ||||
| func RecordvSphereMetric(actionName string, requestTime time.Time, err error) { | ||||
| 	switch actionName { | ||||
| 	case APICreateVolume, APIDeleteVolume, APIAttachVolume, APIDetachVolume: | ||||
| 		recordvSphereAPIMetric(actionName, requestTime, err) | ||||
| 	default: | ||||
| 		recordvSphereOperationMetric(actionName, requestTime, err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func recordvSphereAPIMetric(actionName string, requestTime time.Time, err error) { | ||||
| 	if err != nil { | ||||
| 		vsphereAPIErrorMetric.With(metrics.Labels{"request": actionName}).Inc() | ||||
| 	} else { | ||||
| 		vsphereAPIMetric.With(metrics.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func recordvSphereOperationMetric(actionName string, requestTime time.Time, err error) { | ||||
| 	if err != nil { | ||||
| 		vsphereOperationErrorMetric.With(metrics.Labels{"operation": actionName}).Inc() | ||||
| 	} else { | ||||
| 		vsphereOperationMetric.With(metrics.Labels{"operation": actionName}).Observe(calculateTimeTaken(requestTime)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RecordCreateVolumeMetric records the Create Volume metric | ||||
| func RecordCreateVolumeMetric(volumeOptions *VolumeOptions, requestTime time.Time, err error) { | ||||
| 	var actionName string | ||||
| 	if volumeOptions.StoragePolicyName != "" { | ||||
| 		actionName = OperationCreateVolumeWithPolicy | ||||
| 	} else if volumeOptions.VSANStorageProfileData != "" { | ||||
| 		actionName = OperationCreateVolumeWithRawVSANPolicy | ||||
| 	} else { | ||||
| 		actionName = OperationCreateVolume | ||||
| 	} | ||||
| 	RecordvSphereMetric(actionName, requestTime, err) | ||||
| } | ||||
|  | ||||
| func calculateTimeTaken(requestBeginTime time.Time) (timeTaken float64) { | ||||
| 	if !requestBeginTime.IsZero() { | ||||
| 		timeTaken = time.Since(requestBeginTime).Seconds() | ||||
| 	} else { | ||||
| 		timeTaken = 0 | ||||
| 	} | ||||
| 	return timeTaken | ||||
| } | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @@ -1,880 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"math/rand" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/find" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/property" | ||||
| 	"github.com/vmware/govmomi/vim25" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	"github.com/vmware/govmomi/vim25/soap" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/klog/v2" | ||||
|  | ||||
| 	k8stypes "k8s.io/apimachinery/pkg/types" | ||||
| 	"k8s.io/apimachinery/pkg/util/version" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib/diskmanagers" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	DatastoreProperty     = "datastore" | ||||
| 	DatastoreInfoProperty = "info" | ||||
| 	DatastoreNameProperty = "name" | ||||
| 	Folder                = "Folder" | ||||
| 	VirtualMachine        = "VirtualMachine" | ||||
| 	DummyDiskName         = "kube-dummyDisk.vmdk" | ||||
| 	ProviderPrefix        = "vsphere://" | ||||
| 	vSphereConfFileEnvVar = "VSPHERE_CONF_FILE" | ||||
| 	UUIDPrefix            = "VMware-" | ||||
| ) | ||||
|  | ||||
| // GetVSphere reads vSphere configuration from system environment and construct vSphere object | ||||
| func GetVSphere() (*VSphere, error) { | ||||
| 	cfg, err := getVSphereConfig() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	vs, err := newControllerNode(*cfg) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return vs, nil | ||||
| } | ||||
|  | ||||
| func getVSphereConfig() (*VSphereConfig, error) { | ||||
| 	confFileLocation := os.Getenv(vSphereConfFileEnvVar) | ||||
| 	if confFileLocation == "" { | ||||
| 		return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set.") | ||||
| 	} | ||||
| 	confFile, err := os.Open(confFileLocation) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		if err := confFile.Close(); err != nil { | ||||
| 			klog.Errorf("failed to close config file: %v", err) | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	cfg, err := readConfig(confFile) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &cfg, nil | ||||
| } | ||||
|  | ||||
| // Returns the accessible datastores for the given node VM. | ||||
| func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { | ||||
| 	accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx) | ||||
| 	if err != nil { | ||||
| 		// Check if the node VM is not found which indicates that the node info in the node manager is stale. | ||||
| 		// If so, rediscover the node and retry. | ||||
| 		if vclib.IsManagedObjectNotFoundError(err) { | ||||
| 			klog.V(4).Infof("error %q ManagedObjectNotFound for node %q. Rediscovering...", err, nodeVmDetail.NodeName) | ||||
| 			err = nodeManager.RediscoverNode(convertToK8sType(nodeVmDetail.NodeName)) | ||||
| 			if err == nil { | ||||
| 				klog.V(4).Infof("Discovered node %s successfully", nodeVmDetail.NodeName) | ||||
| 				nodeInfo, err := nodeManager.GetNodeInfo(convertToK8sType(nodeVmDetail.NodeName)) | ||||
| 				if err != nil { | ||||
| 					klog.V(4).Infof("error %q getting node info for node %+v", err, nodeVmDetail) | ||||
| 					return nil, err | ||||
| 				} | ||||
|  | ||||
| 				accessibleDatastores, err = nodeInfo.vm.GetAllAccessibleDatastores(ctx) | ||||
| 				if err != nil { | ||||
| 					klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) | ||||
| 					return nil, err | ||||
| 				} | ||||
| 			} else { | ||||
| 				klog.V(4).Infof("error %q rediscovering node %+v", err, nodeVmDetail) | ||||
| 				return nil, err | ||||
| 			} | ||||
| 		} else { | ||||
| 			klog.V(4).Infof("error %q getting accessible datastores for node %+v", err, nodeVmDetail) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	return accessibleDatastores, nil | ||||
| } | ||||
|  | ||||
| // Get all datastores accessible for the virtual machine object. | ||||
| func getSharedDatastoresInK8SCluster(ctx context.Context, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) { | ||||
| 	nodeVmDetails, err := nodeManager.GetNodeDetails() | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if len(nodeVmDetails) == 0 { | ||||
| 		msg := fmt.Sprintf("Kubernetes node nodeVmDetail details is empty. nodeVmDetails : %+v", nodeVmDetails) | ||||
| 		klog.Error(msg) | ||||
| 		return nil, fmt.Errorf(msg) | ||||
| 	} | ||||
| 	var sharedDatastores []*vclib.DatastoreInfo | ||||
| 	for _, nodeVmDetail := range nodeVmDetails { | ||||
| 		klog.V(9).Infof("Getting accessible datastores for node %s", nodeVmDetail.NodeName) | ||||
| 		accessibleDatastores, err := getAccessibleDatastores(ctx, &nodeVmDetail, nodeManager) | ||||
| 		if err != nil { | ||||
| 			if err == vclib.ErrNoVMFound { | ||||
| 				klog.V(9).Infof("Got NoVMFound error for node %s", nodeVmDetail.NodeName) | ||||
| 				continue | ||||
| 			} | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		if len(sharedDatastores) == 0 { | ||||
| 			sharedDatastores = accessibleDatastores | ||||
| 		} else { | ||||
| 			sharedDatastores = intersect(sharedDatastores, accessibleDatastores) | ||||
| 			if len(sharedDatastores) == 0 { | ||||
| 				return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster for nodeVmDetails: %+v", nodeVmDetails) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	klog.V(9).Infof("sharedDatastores : %+v", sharedDatastores) | ||||
| 	return sharedDatastores, nil | ||||
| } | ||||
|  | ||||
| func intersect(list1 []*vclib.DatastoreInfo, list2 []*vclib.DatastoreInfo) []*vclib.DatastoreInfo { | ||||
| 	klog.V(9).Infof("list1: %+v", list1) | ||||
| 	klog.V(9).Infof("list2: %+v", list2) | ||||
| 	var sharedDs []*vclib.DatastoreInfo | ||||
| 	for _, val1 := range list1 { | ||||
| 		// Check if val1 is found in list2 | ||||
| 		for _, val2 := range list2 { | ||||
| 			// Intersection is performed based on the datastoreUrl as this uniquely identifies the datastore. | ||||
| 			if val1.Info.Url == val2.Info.Url { | ||||
| 				sharedDs = append(sharedDs, val1) | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return sharedDs | ||||
| } | ||||
|  | ||||
| // getMostFreeDatastore gets the best fit compatible datastore by free space. | ||||
| func getMostFreeDatastore(ctx context.Context, client *vim25.Client, dsInfoList []*vclib.DatastoreInfo) (*vclib.DatastoreInfo, error) { | ||||
| 	var curMax int64 | ||||
| 	curMax = -1 | ||||
| 	var index int | ||||
| 	for i, dsInfo := range dsInfoList { | ||||
| 		dsFreeSpace := dsInfo.Info.GetDatastoreInfo().FreeSpace | ||||
| 		if dsFreeSpace > curMax { | ||||
| 			curMax = dsFreeSpace | ||||
| 			index = i | ||||
| 		} | ||||
| 	} | ||||
| 	return dsInfoList[index], nil | ||||
| } | ||||
|  | ||||
| func getPbmCompatibleDatastore(ctx context.Context, vcClient *vim25.Client, storagePolicyName string, nodeManager *NodeManager) (*vclib.DatastoreInfo, error) { | ||||
| 	pbmClient, err := vclib.NewPbmClient(ctx, vcClient) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	sharedDs, err := getSharedDatastoresInK8SCluster(ctx, nodeManager) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get shared datastores. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if len(sharedDs) == 0 { | ||||
| 		msg := "No shared datastores found in the endpoint virtual center" | ||||
| 		klog.Errorf(msg) | ||||
| 		return nil, errors.New(msg) | ||||
| 	} | ||||
| 	compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, storagePolicyID, sharedDs) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", | ||||
| 			sharedDs, storagePolicyID, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) | ||||
| 	datastore, err := getMostFreeDatastore(ctx, vcClient, compatibleDatastores) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klog.V(4).Infof("Most free datastore : %+s", datastore.Info.Name) | ||||
| 	return datastore, err | ||||
| } | ||||
|  | ||||
| func getDatastoresForZone(ctx context.Context, nodeManager *NodeManager, selectedZones []string) ([]*vclib.DatastoreInfo, error) { | ||||
|  | ||||
| 	var sharedDatastores []*vclib.DatastoreInfo | ||||
|  | ||||
| 	for _, zone := range selectedZones { | ||||
| 		var sharedDatastoresPerZone []*vclib.DatastoreInfo | ||||
| 		hosts, err := nodeManager.GetHostsInZone(ctx, zone) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		klog.V(4).Infof("Hosts in zone %s : %s", zone, hosts) | ||||
|  | ||||
| 		for _, host := range hosts { | ||||
| 			var hostSystemMo mo.HostSystem | ||||
| 			err = host.Properties(ctx, host.Reference(), []string{"datastore"}, &hostSystemMo) | ||||
| 			if err != nil { | ||||
| 				klog.Errorf("Failed to get datastore property for host %s. err : %+v", host, err) | ||||
| 				return nil, err | ||||
| 			} | ||||
|  | ||||
| 			klog.V(4).Infof("Datastores mounted on host %s : %s", host, hostSystemMo.Datastore) | ||||
| 			var dsRefList []types.ManagedObjectReference | ||||
| 			for _, dsRef := range hostSystemMo.Datastore { | ||||
| 				dsRefList = append(dsRefList, dsRef) | ||||
| 			} | ||||
|  | ||||
| 			var dsMoList []mo.Datastore | ||||
| 			pc := property.DefaultCollector(host.Client()) | ||||
| 			properties := []string{DatastoreInfoProperty, DatastoreNameProperty} | ||||
| 			err = pc.Retrieve(ctx, dsRefList, properties, &dsMoList) | ||||
| 			if err != nil { | ||||
| 				klog.Errorf("Failed to get Datastore managed objects from datastore objects."+ | ||||
| 					" dsObjList: %+v, properties: %+v, err: %+v", dsRefList, properties, err) | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			klog.V(9).Infof("Datastore mo details: %+v", dsMoList) | ||||
|  | ||||
| 			// find the Datacenter parent for this host | ||||
| 			mes, err := mo.Ancestors(ctx, host.Client(), pc.Reference(), host.Reference()) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			var dcMoref *types.ManagedObjectReference | ||||
| 			for i := len(mes) - 1; i > 0; i-- { | ||||
| 				if mes[i].Self.Type == "Datacenter" { | ||||
| 					dcMoref = &mes[i].Self | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			if dcMoref == nil { | ||||
| 				return nil, fmt.Errorf("failed to find the Datacenter of host %s", host) | ||||
| 			} | ||||
|  | ||||
| 			dc := object.NewDatacenter(host.Client(), *dcMoref) | ||||
| 			finder := find.NewFinder(host.Client(), false) | ||||
| 			finder.SetDatacenter(dc) | ||||
| 			var dsObjList []*vclib.DatastoreInfo | ||||
| 			for _, dsMo := range dsMoList { | ||||
| 				// use the finder so that InventoryPath is set correctly in dsObj | ||||
| 				dsObj, err := finder.Datastore(ctx, dsMo.Name) | ||||
| 				if err != nil { | ||||
| 					klog.Errorf("Failed to find datastore named %s in datacenter %s", dsMo.Name, dc) | ||||
| 					return nil, err | ||||
| 				} | ||||
| 				dsObjList = append(dsObjList, | ||||
| 					&vclib.DatastoreInfo{ | ||||
| 						Datastore: &vclib.Datastore{Datastore: dsObj, | ||||
| 							Datacenter: &vclib.Datacenter{Datacenter: dc}}, | ||||
| 						Info: dsMo.Info.GetDatastoreInfo()}) | ||||
| 			} | ||||
|  | ||||
| 			klog.V(9).Infof("DatastoreInfo details : %s", dsObjList) | ||||
|  | ||||
| 			if len(sharedDatastoresPerZone) == 0 { | ||||
| 				sharedDatastoresPerZone = dsObjList | ||||
| 			} else { | ||||
| 				sharedDatastoresPerZone = intersect(sharedDatastoresPerZone, dsObjList) | ||||
| 				if len(sharedDatastoresPerZone) == 0 { | ||||
| 					klog.V(4).Infof("No shared datastores found among hosts %s", hosts) | ||||
| 					return nil, fmt.Errorf("No matching datastores found in the kubernetes cluster for zone %s", zone) | ||||
| 				} | ||||
| 			} | ||||
| 			klog.V(9).Infof("Shared datastore list after processing host %s : %s", host, sharedDatastoresPerZone) | ||||
| 		} | ||||
| 		klog.V(4).Infof("Shared datastore per zone %s is %s", zone, sharedDatastoresPerZone) | ||||
| 		if len(sharedDatastores) == 0 { | ||||
| 			sharedDatastores = sharedDatastoresPerZone | ||||
| 		} else { | ||||
| 			sharedDatastores = intersect(sharedDatastores, sharedDatastoresPerZone) | ||||
| 			if len(sharedDatastores) == 0 { | ||||
| 				return nil, fmt.Errorf("No matching datastores found in the kubernetes cluster across zones %s", selectedZones) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	klog.V(1).Infof("Returning selected datastores : %s", sharedDatastores) | ||||
| 	return sharedDatastores, nil | ||||
| } | ||||
|  | ||||
| func getPbmCompatibleZonedDatastore(ctx context.Context, vcClient *vim25.Client, storagePolicyName string, zonedDatastores []*vclib.DatastoreInfo) (*vclib.DatastoreInfo, error) { | ||||
| 	pbmClient, err := vclib.NewPbmClient(ctx, vcClient) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, storagePolicyID, zonedDatastores) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", | ||||
| 			zonedDatastores, storagePolicyID, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klog.V(9).Infof("compatibleDatastores : %+v", compatibleDatastores) | ||||
| 	datastore, err := getMostFreeDatastore(ctx, vcClient, compatibleDatastores) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	klog.V(4).Infof("Most free datastore : %+s", datastore.Info.Name) | ||||
| 	return datastore, err | ||||
| } | ||||
|  | ||||
| func (vs *VSphere) setVMOptions(ctx context.Context, connection *vclib.VSphereConnection, ds *vclib.Datastore) (*vclib.VMOptions, error) { | ||||
| 	var vmOptions vclib.VMOptions | ||||
| 	dsHosts, err := ds.GetDatastoreHostMounts(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get datastore host mounts for %v: %+v", ds, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	// pick a host at random to use for Volume creation | ||||
| 	dsHostMoref := dsHosts[rand.New(rand.NewSource(time.Now().UnixNano())).Intn(len(dsHosts))] | ||||
| 	dummyVMHost := object.NewHostSystem(connection.Client, dsHostMoref) | ||||
| 	resourcePool, err := dummyVMHost.ResourcePool(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get resource pool from host %v", dummyVMHost) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	folder, err := ds.Datacenter.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	vmOptions.VMFolder = folder | ||||
| 	vmOptions.VMResourcePool = resourcePool | ||||
| 	return &vmOptions, nil | ||||
| } | ||||
|  | ||||
| // A background routine which will be responsible for deleting stale dummy VM's. | ||||
| func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { | ||||
| 	// Create context | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	for { | ||||
| 		time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute) | ||||
| 		datacenters, err := vs.GetWorkspaceDatacenters(ctx) | ||||
| 		if err != nil { | ||||
| 			klog.V(4).Infof("Failed to get datacenters from VC. err: %+v", err) | ||||
| 			continue | ||||
| 		} | ||||
| 		// Clean up dummy VMs in each datacenter | ||||
| 		for _, dc := range datacenters { | ||||
| 			// Get the folder reference for global working directory where the dummy VM needs to be created. | ||||
| 			vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder) | ||||
| 			if err != nil { | ||||
| 				klog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Workspace.Folder, err) | ||||
| 				continue | ||||
| 			} | ||||
| 			// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. | ||||
| 			cleanUpDummyVMs := func() { | ||||
| 				cleanUpDummyVMLock.Lock() | ||||
| 				defer cleanUpDummyVMLock.Unlock() | ||||
| 				err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder) | ||||
| 				if err != nil { | ||||
| 					klog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Workspace.Folder, err) | ||||
| 				} | ||||
| 			} | ||||
| 			cleanUpDummyVMs() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Get canonical volume path for volume Path. | ||||
| // Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk | ||||
| // Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path. | ||||
| func getcanonicalVolumePath(ctx context.Context, dc *vclib.Datacenter, volumePath string) (string, error) { | ||||
| 	var folderID string | ||||
| 	var folderExists bool | ||||
| 	canonicalVolumePath := volumePath | ||||
| 	dsPathObj, err := vclib.GetDatastorePathObjFromVMDiskPath(volumePath) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/") | ||||
| 	if len(dsPath) <= 1 { | ||||
| 		return canonicalVolumePath, nil | ||||
| 	} | ||||
| 	datastore := dsPathObj.Datastore | ||||
| 	dsFolder := dsPath[0] | ||||
| 	folderNameIDMap, datastoreExists := datastoreFolderIDMap[datastore] | ||||
| 	if datastoreExists { | ||||
| 		folderID, folderExists = folderNameIDMap[dsFolder] | ||||
| 	} | ||||
| 	// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap | ||||
| 	if !datastoreExists || !folderExists { | ||||
| 		if !vclib.IsValidUUID(dsFolder) { | ||||
| 			dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName | ||||
| 			// Querying a non-existent dummy disk on the datastore folder. | ||||
| 			// It would fail and return an folder ID in the error message. | ||||
| 			_, err := dc.GetVirtualDiskPage83Data(ctx, dummyDiskVolPath) | ||||
| 			canonicalVolumePath, err = getPathFromFileNotFound(err) | ||||
| 			if err != nil { | ||||
| 				return "", fmt.Errorf("failed to get path from dummy request: %v", err) | ||||
| 			} | ||||
| 		} | ||||
| 		diskPath := vclib.GetPathFromVMDiskPath(canonicalVolumePath) | ||||
| 		if diskPath == "" { | ||||
| 			return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath) | ||||
| 		} | ||||
| 		folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0] | ||||
| 		setdatastoreFolderIDMap(datastoreFolderIDMap, datastore, dsFolder, folderID) | ||||
| 	} | ||||
| 	canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1) | ||||
| 	return canonicalVolumePath, nil | ||||
| } | ||||
|  | ||||
| // getPathFromFileNotFound returns the path from a fileNotFound error | ||||
| func getPathFromFileNotFound(err error) (string, error) { | ||||
| 	if soap.IsSoapFault(err) { | ||||
| 		fault := soap.ToSoapFault(err) | ||||
| 		f, ok := fault.VimFault().(types.FileNotFound) | ||||
| 		if !ok { | ||||
| 			return "", fmt.Errorf("%v is not a FileNotFound error", err) | ||||
| 		} | ||||
| 		return f.File, nil | ||||
| 	} | ||||
| 	return "", fmt.Errorf("%v is not a soap fault", err) | ||||
| } | ||||
|  | ||||
| func setdatastoreFolderIDMap( | ||||
| 	datastoreFolderIDMap map[string]map[string]string, | ||||
| 	datastore string, | ||||
| 	folderName string, | ||||
| 	folderID string) { | ||||
| 	folderNameIDMap := datastoreFolderIDMap[datastore] | ||||
| 	if folderNameIDMap == nil { | ||||
| 		folderNameIDMap = make(map[string]string) | ||||
| 		datastoreFolderIDMap[datastore] = folderNameIDMap | ||||
| 	} | ||||
| 	folderNameIDMap[folderName] = folderID | ||||
| } | ||||
|  | ||||
| func convertVolPathToDevicePath(ctx context.Context, dc *vclib.Datacenter, volPath string) (string, error) { | ||||
| 	volPath = vclib.RemoveStorageClusterORFolderNameFromVDiskPath(volPath) | ||||
| 	// Get the canonical volume path for volPath. | ||||
| 	canonicalVolumePath, err := getcanonicalVolumePath(ctx, dc, volPath) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map | ||||
| 	if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" { | ||||
| 		canonicalVolumePath += ".vmdk" | ||||
| 	} | ||||
| 	return canonicalVolumePath, nil | ||||
| } | ||||
|  | ||||
| // convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath | ||||
| func (vs *VSphere) convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[k8stypes.NodeName][]string) (map[k8stypes.NodeName][]string, error) { | ||||
| 	vmVolumes := make(map[k8stypes.NodeName][]string) | ||||
| 	for nodeName, volPaths := range nodeVolumes { | ||||
| 		nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		for i, volPath := range volPaths { | ||||
| 			deviceVolPath, err := convertVolPathToDevicePath(ctx, nodeInfo.dataCenter, volPath) | ||||
| 			if err != nil { | ||||
| 				klog.Errorf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err) | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			volPaths[i] = deviceVolPath | ||||
| 		} | ||||
| 		vmVolumes[nodeName] = volPaths | ||||
| 	} | ||||
| 	return vmVolumes, nil | ||||
| } | ||||
|  | ||||
| // checkDiskAttached verifies volumes are attached to the VMs which are in same vCenter and Datacenter | ||||
| // Returns nodes if exist any for which VM is not found in that vCenter and Datacenter | ||||
| func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeName, nodeVolumes map[k8stypes.NodeName][]string, attached map[string]map[string]bool, retry bool) ([]k8stypes.NodeName, error) { | ||||
| 	var nodesToRetry []k8stypes.NodeName | ||||
| 	var vmList []*vclib.VirtualMachine | ||||
| 	var nodeInfo NodeInfo | ||||
| 	var err error | ||||
|  | ||||
| 	for _, nodeName := range nodes { | ||||
| 		nodeInfo, err = vs.nodeManager.GetNodeInfo(nodeName) | ||||
| 		if err != nil { | ||||
| 			return nodesToRetry, err | ||||
| 		} | ||||
| 		vmList = append(vmList, nodeInfo.vm) | ||||
| 	} | ||||
|  | ||||
| 	// Making sure session is valid | ||||
| 	_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx) | ||||
| 	if err != nil { | ||||
| 		return nodesToRetry, err | ||||
| 	} | ||||
|  | ||||
| 	// If any of the nodes are not present property collector query will fail for entire operation | ||||
| 	vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"}) | ||||
| 	if err != nil { | ||||
| 		if vclib.IsManagedObjectNotFoundError(err) && !retry { | ||||
| 			klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodes, vmList) | ||||
| 			// Property Collector Query failed | ||||
| 			// VerifyVolumePaths per VM | ||||
| 			for _, nodeName := range nodes { | ||||
| 				nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) | ||||
| 				if err != nil { | ||||
| 					return nodesToRetry, err | ||||
| 				} | ||||
| 				devices, err := nodeInfo.vm.VirtualMachine.Device(ctx) | ||||
| 				if err != nil { | ||||
| 					if vclib.IsManagedObjectNotFoundError(err) { | ||||
| 						klog.V(4).Infof("checkDiskAttached: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm) | ||||
| 						nodesToRetry = append(nodesToRetry, nodeName) | ||||
| 						continue | ||||
| 					} | ||||
| 					return nodesToRetry, err | ||||
| 				} | ||||
| 				klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm) | ||||
| 				vs.vsphereVolumeMap.Add(nodeName, devices) | ||||
| 				vclib.VerifyVolumePathsForVMDevices(devices, nodeVolumes[nodeName], convertToString(nodeName), attached) | ||||
| 			} | ||||
| 		} | ||||
| 		return nodesToRetry, err | ||||
| 	} | ||||
|  | ||||
| 	vmMoMap := make(map[string]mo.VirtualMachine) | ||||
| 	for _, vmMo := range vmMoList { | ||||
| 		if vmMo.Config == nil { | ||||
| 			klog.Errorf("Config is not available for VM: %q", vmMo.Name) | ||||
| 			continue | ||||
| 		} | ||||
| 		klog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid)) | ||||
| 		vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo | ||||
| 	} | ||||
|  | ||||
| 	klog.V(9).Infof("vmMoMap: +%v", vmMoMap) | ||||
|  | ||||
| 	for _, nodeName := range nodes { | ||||
| 		node, err := vs.nodeManager.GetNode(nodeName) | ||||
| 		if err != nil { | ||||
| 			return nodesToRetry, err | ||||
| 		} | ||||
| 		nodeUUID, err := GetNodeUUID(&node) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) | ||||
| 			return nodesToRetry, err | ||||
| 		} | ||||
| 		nodeUUID = strings.ToLower(nodeUUID) | ||||
| 		klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap) | ||||
| 		vmMo := vmMoMap[nodeUUID] | ||||
| 		vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device) | ||||
| 		vs.vsphereVolumeMap.Add(nodeName, vmDevices) | ||||
| 		vclib.VerifyVolumePathsForVMDevices(vmDevices, nodeVolumes[nodeName], convertToString(nodeName), attached) | ||||
| 	} | ||||
| 	return nodesToRetry, nil | ||||
| } | ||||
|  | ||||
| // BuildMissingVolumeNodeMap builds a map of volumes and nodes which are not known to attach detach controller. | ||||
| // There could be nodes in cluster which do not have any pods with vsphere volumes running on them | ||||
| // such nodes won't be part of disk verification check because attach-detach controller does not keep track | ||||
| // such nodes. But such nodes may still have dangling volumes on them and hence we need to scan all the | ||||
| // remaining nodes which weren't scanned by code previously. | ||||
| func (vs *VSphere) BuildMissingVolumeNodeMap(ctx context.Context) { | ||||
| 	nodeNames := vs.nodeManager.GetNodeNames() | ||||
| 	// Segregate nodes according to VC-DC | ||||
| 	dcNodes := make(map[string][]k8stypes.NodeName) | ||||
|  | ||||
| 	for _, nodeName := range nodeNames { | ||||
| 		// if given node is not in node volume map | ||||
| 		if !vs.vsphereVolumeMap.CheckForNode(nodeName) { | ||||
| 			nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) | ||||
| 			if err != nil { | ||||
| 				klog.V(4).Infof("Failed to get node info: %+v. err: %+v", nodeInfo.vm, err) | ||||
| 				continue | ||||
| 			} | ||||
| 			vcDC := nodeInfo.vcServer + nodeInfo.dataCenter.String() | ||||
| 			dcNodes[vcDC] = append(dcNodes[vcDC], nodeName) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
|  | ||||
| 	for _, nodeNames := range dcNodes { | ||||
| 		// Start go routines per VC-DC to check disks are attached | ||||
| 		wg.Add(1) | ||||
| 		go func(nodes []k8stypes.NodeName) { | ||||
| 			err := vs.checkNodeDisks(ctx, nodes) | ||||
| 			if err != nil { | ||||
| 				klog.Errorf("Failed to check disk attached for nodes: %+v. err: %+v", nodes, err) | ||||
| 			} | ||||
| 			wg.Done() | ||||
| 		}(nodeNames) | ||||
| 	} | ||||
| 	wg.Wait() | ||||
| } | ||||
|  | ||||
| func (vs *VSphere) checkNodeDisks(ctx context.Context, nodeNames []k8stypes.NodeName) error { | ||||
| 	var vmList []*vclib.VirtualMachine | ||||
| 	var nodeInfo NodeInfo | ||||
| 	var err error | ||||
|  | ||||
| 	for _, nodeName := range nodeNames { | ||||
| 		nodeInfo, err = vs.nodeManager.GetNodeInfo(nodeName) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		vmList = append(vmList, nodeInfo.vm) | ||||
| 	} | ||||
|  | ||||
| 	// Making sure session is valid | ||||
| 	_, err = vs.getVSphereInstanceForServer(nodeInfo.vcServer, ctx) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// If any of the nodes are not present property collector query will fail for entire operation | ||||
| 	vmMoList, err := nodeInfo.dataCenter.GetVMMoList(ctx, vmList, []string{"config.hardware.device", "name", "config.uuid"}) | ||||
| 	if err != nil { | ||||
| 		if vclib.IsManagedObjectNotFoundError(err) { | ||||
| 			klog.V(4).Infof("checkNodeDisks: ManagedObjectNotFound for property collector query for nodes: %+v vms: %+v", nodeNames, vmList) | ||||
| 			// Property Collector Query failed | ||||
| 			// VerifyVolumePaths per VM | ||||
| 			for _, nodeName := range nodeNames { | ||||
| 				nodeInfo, err := vs.nodeManager.GetNodeInfo(nodeName) | ||||
| 				if err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 				devices, err := nodeInfo.vm.VirtualMachine.Device(ctx) | ||||
| 				if err != nil { | ||||
| 					if vclib.IsManagedObjectNotFoundError(err) { | ||||
| 						klog.V(4).Infof("checkNodeDisks: ManagedObjectNotFound for Kubernetes node: %s with vSphere Virtual Machine reference: %v", nodeName, nodeInfo.vm) | ||||
| 						continue | ||||
| 					} | ||||
| 					return err | ||||
| 				} | ||||
| 				klog.V(4).Infof("Verifying Volume Paths by devices for node %s and VM %s", nodeName, nodeInfo.vm) | ||||
| 				vs.vsphereVolumeMap.Add(nodeName, devices) | ||||
| 			} | ||||
| 			return nil | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	vmMoMap := make(map[string]mo.VirtualMachine) | ||||
| 	for _, vmMo := range vmMoList { | ||||
| 		if vmMo.Config == nil { | ||||
| 			klog.Errorf("Config is not available for VM: %q", vmMo.Name) | ||||
| 			continue | ||||
| 		} | ||||
| 		klog.V(9).Infof("vmMoMap vmname: %q vmuuid: %s", vmMo.Name, strings.ToLower(vmMo.Config.Uuid)) | ||||
| 		vmMoMap[strings.ToLower(vmMo.Config.Uuid)] = vmMo | ||||
| 	} | ||||
|  | ||||
| 	klog.V(9).Infof("vmMoMap: +%v", vmMoMap) | ||||
|  | ||||
| 	for _, nodeName := range nodeNames { | ||||
| 		node, err := vs.nodeManager.GetNode(nodeName) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		nodeUUID, err := GetNodeUUID(&node) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Node Discovery failed to get node uuid for node %s with error: %v", node.Name, err) | ||||
| 			return err | ||||
| 		} | ||||
| 		nodeUUID = strings.ToLower(nodeUUID) | ||||
| 		klog.V(9).Infof("Verifying volume for node %s with nodeuuid %q: %v", nodeName, nodeUUID, vmMoMap) | ||||
| 		vmMo := vmMoMap[nodeUUID] | ||||
| 		vmDevices := object.VirtualDeviceList(vmMo.Config.Hardware.Device) | ||||
| 		vs.vsphereVolumeMap.Add(nodeName, vmDevices) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (vs *VSphere) GetNodeNameFromProviderID(providerID string) (string, error) { | ||||
| 	var nodeName string | ||||
| 	nodes, err := vs.nodeManager.GetNodeDetails() | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error while obtaining Kubernetes node nodeVmDetail details. error : %+v", err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	for _, node := range nodes { | ||||
| 		// ProviderID is UUID for nodes v1.9.3+ | ||||
| 		if node.VMUUID == GetUUIDFromProviderID(providerID) || node.NodeName == providerID { | ||||
| 			nodeName = node.NodeName | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if nodeName == "" { | ||||
| 		msg := fmt.Sprintf("Error while obtaining Kubernetes nodename for providerID %s.", providerID) | ||||
| 		return "", errors.New(msg) | ||||
| 	} | ||||
| 	return nodeName, nil | ||||
| } | ||||
|  | ||||
| func GetUUIDFromProviderID(providerID string) string { | ||||
| 	return strings.TrimPrefix(providerID, ProviderPrefix) | ||||
| } | ||||
|  | ||||
| func IsUUIDSupportedNode(node *v1.Node) (bool, error) { | ||||
| 	newVersion, err := version.ParseSemantic("v1.9.4") | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) | ||||
| 		return false, err | ||||
| 	} | ||||
| 	nodeVersion, err := version.ParseSemantic(node.Status.NodeInfo.KubeletVersion) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to determine whether node %+v is old with error %v", node, err) | ||||
| 		return false, err | ||||
| 	} | ||||
| 	if nodeVersion.LessThan(newVersion) { | ||||
| 		return true, nil | ||||
| 	} | ||||
| 	return false, nil | ||||
| } | ||||
|  | ||||
| func isGuestHardwareVersionDeprecated(vmHardwareversion string) (bool, error) { | ||||
| 	vmHardwareDeprecated := false | ||||
| 	// vmconfig.Version returns vm hardware version as vmx-11, vmx-13, vmx-14, vmx-15 etc. | ||||
| 	version := strings.Trim(vmHardwareversion, "vmx-") | ||||
| 	value, err := strconv.ParseInt(version, 0, 64) | ||||
| 	if err != nil { | ||||
| 		return false, fmt.Errorf("failed to parse vm hardware version: %v Err: %v", version, err) | ||||
| 	} else { | ||||
| 		if value < 15 { | ||||
| 			vmHardwareDeprecated = true | ||||
| 		} | ||||
| 	} | ||||
| 	return vmHardwareDeprecated, nil | ||||
| } | ||||
|  | ||||
| func GetNodeUUID(node *v1.Node) (string, error) { | ||||
| 	oldNode, err := IsUUIDSupportedNode(node) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to get node UUID for node %+v with error %v", node, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	if oldNode { | ||||
| 		return node.Status.NodeInfo.SystemUUID, nil | ||||
| 	} | ||||
| 	return GetUUIDFromProviderID(node.Spec.ProviderID), nil | ||||
| } | ||||
|  | ||||
| func GetVMUUID() (string, error) { | ||||
| 	uuidFromFile, err := getRawUUID() | ||||
| 	if err != nil { | ||||
| 		return "", fmt.Errorf("error retrieving vm uuid: %s", err) | ||||
| 	} | ||||
| 	//strip leading and trailing white space and new line char | ||||
| 	uuid := strings.TrimSpace(uuidFromFile) | ||||
| 	// check the uuid starts with "VMware-" | ||||
| 	if !strings.HasPrefix(uuid, UUIDPrefix) { | ||||
| 		return "", fmt.Errorf("failed to match Prefix, UUID read from the file is %v", uuidFromFile) | ||||
| 	} | ||||
| 	// Strip the prefix and white spaces and - | ||||
| 	uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1) | ||||
| 	uuid = strings.Replace(uuid, "-", "", -1) | ||||
| 	if len(uuid) != 32 { | ||||
| 		return "", fmt.Errorf("length check failed, UUID read from the file is %v", uuidFromFile) | ||||
| 	} | ||||
| 	// need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f" | ||||
| 	uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32]) | ||||
| 	return uuid, nil | ||||
| } | ||||
|  | ||||
| // GetWorkspaceDatacenters returns the Datacenter objects that VCP has access to. | ||||
| // User can configure the list of datacenters in vsphere.conf. Otherwise all the | ||||
| // Datacenters in the configured list of VCs are returned. | ||||
| func (vs *VSphere) GetWorkspaceDatacenters(ctx context.Context) ([]*vclib.Datacenter, error) { | ||||
| 	var datacenterObjs []*vclib.Datacenter | ||||
| 	for vc, vsi := range vs.vsphereInstanceMap { | ||||
| 		// ensure connection to VC | ||||
| 		err := vs.nodeManager.vcConnect(ctx, vsi) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if vsi.cfg.Datacenters == "" { | ||||
| 			vcDatacenterObjs, err := vclib.GetAllDatacenter(ctx, vsi.conn) | ||||
| 			if err != nil { | ||||
| 				klog.Errorf("Error fetching list of datacenters from VC %s: %+v", vc, err) | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			datacenterObjs = append(datacenterObjs, vcDatacenterObjs...) | ||||
| 		} else { | ||||
| 			datacenters := strings.Split(vsi.cfg.Datacenters, ",") | ||||
| 			for _, dc := range datacenters { | ||||
| 				dc = strings.TrimSpace(dc) | ||||
| 				if dc == "" { | ||||
| 					continue | ||||
| 				} | ||||
| 				datacenterObj, err := vclib.GetDatacenter(ctx, vsi.conn, dc) | ||||
| 				if err != nil { | ||||
| 					klog.Errorf("Error fetching datacenter %s from VC %s: %+v", dc, vc, err) | ||||
| 					return nil, err | ||||
| 				} | ||||
| 				datacenterObjs = append(datacenterObjs, datacenterObj) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return datacenterObjs, nil | ||||
| } | ||||
|  | ||||
| // FindDatastoreByName looks for the given datastore by name across all available datacenters. | ||||
| // If more than one Datacenter has a Datastore with the given name, then returns reference to all of them. | ||||
| func (vs *VSphere) FindDatastoreByName(ctx context.Context, datastoreName string) ([]*vclib.DatastoreInfo, error) { | ||||
| 	datacenters, err := vs.GetWorkspaceDatacenters(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	var datastoreInfos []*vclib.DatastoreInfo | ||||
| 	for _, dc := range datacenters { | ||||
| 		datastoreInfo, err := dc.GetDatastoreInfoByName(ctx, datastoreName) | ||||
| 		if err != nil { | ||||
| 			klog.V(9).Infof("Did not find datastore %s in datacenter %s, still looking.", datastoreName, dc.Name()) | ||||
| 			continue | ||||
| 		} | ||||
| 		datastoreInfos = append(datastoreInfos, datastoreInfo) | ||||
| 	} | ||||
| 	if len(datastoreInfos) == 0 { | ||||
| 		return nil, fmt.Errorf("Datastore '%s' not found", datastoreName) | ||||
| 	} | ||||
| 	klog.V(4).Infof("Found datastore infos %v for datastore %s", datastoreInfos, datastoreName) | ||||
| 	return datastoreInfos, nil | ||||
| } | ||||
| @@ -1,34 +0,0 @@ | ||||
| //go:build !providerless && linux | ||||
| // +build !providerless,linux | ||||
|  | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"io/ioutil" | ||||
| ) | ||||
|  | ||||
| const UUIDPath = "/sys/class/dmi/id/product_serial" | ||||
|  | ||||
| func getRawUUID() (string, error) { | ||||
| 	id, err := ioutil.ReadFile(UUIDPath) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return string(id), nil | ||||
| } | ||||
| @@ -1,135 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"testing" | ||||
|  | ||||
| 	"k8s.io/legacy-cloud-providers/vsphere/vclib" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/simulator" | ||||
| ) | ||||
|  | ||||
| func TestGetPathFromFileNotFound(t *testing.T) { | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	// vCenter model + initial set of objects (cluster, hosts, VMs, network, datastore, etc) | ||||
| 	model := simulator.VPX() | ||||
|  | ||||
| 	defer model.Remove() | ||||
| 	err := model.Create() | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	s := model.Service.NewServer() | ||||
| 	defer s.Close() | ||||
|  | ||||
| 	c, err := govmomi.NewClient(ctx, s.URL, true) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	vc := &vclib.VSphereConnection{Client: c.Client} | ||||
|  | ||||
| 	dc, err := vclib.GetDatacenter(ctx, vc, vclib.TestDefaultDatacenter) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("failed to get datacenter: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	requestDiskPath := fmt.Sprintf("[%s] %s", vclib.TestDefaultDatastore, DummyDiskName) | ||||
| 	_, err = dc.GetVirtualDiskPage83Data(ctx, requestDiskPath) | ||||
| 	if err == nil { | ||||
| 		t.Error("expected error when calling GetVirtualDiskPage83Data") | ||||
| 	} | ||||
|  | ||||
| 	_, err = getPathFromFileNotFound(err) | ||||
| 	if err != nil { | ||||
| 		t.Errorf("expected err to be nil but was %v", err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = getPathFromFileNotFound(nil) | ||||
| 	if err == nil { | ||||
| 		t.Errorf("expected err when calling getPathFromFileNotFound with nil err") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestVMX15Deprecated(t *testing.T) { | ||||
| 	vmhardwaredeprecated, err := isGuestHardwareVersionDeprecated("vmx-15") | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	if vmhardwaredeprecated { | ||||
| 		t.Fatal("vmx-15 should not be deprecated") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestVMX14Deprecated(t *testing.T) { | ||||
| 	vmhardwaredeprecated, err := isGuestHardwareVersionDeprecated("vmx-14") | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	if !vmhardwaredeprecated { | ||||
| 		t.Fatal("vmx-14 should be deprecated") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestVMX13Deprecated(t *testing.T) { | ||||
| 	vmhardwaredeprecated, err := isGuestHardwareVersionDeprecated("vmx-13") | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	if !vmhardwaredeprecated { | ||||
| 		t.Fatal("vmx-13 should be deprecated") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestVMX11Deprecated(t *testing.T) { | ||||
| 	vmhardwaredeprecated, err := isGuestHardwareVersionDeprecated("vmx-11") | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	if !vmhardwaredeprecated { | ||||
| 		t.Fatal("vmx-11 should be deprecated") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestVMX17Deprecated(t *testing.T) { | ||||
| 	vmhardwaredeprecated, err := isGuestHardwareVersionDeprecated("vmx-17") | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	if vmhardwaredeprecated { | ||||
| 		t.Fatal("vmx-17 should not be deprecated") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func TestVMX18Deprecated(t *testing.T) { | ||||
| 	vmhardwaredeprecated, err := isGuestHardwareVersionDeprecated("vmx-18") | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	if vmhardwaredeprecated { | ||||
| 		t.Fatal("vmx-18 should not be deprecated") | ||||
| 	} | ||||
| } | ||||
| @@ -1,26 +0,0 @@ | ||||
| //go:build !providerless && !windows && !linux | ||||
| // +build !providerless,!windows,!linux | ||||
|  | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| func getRawUUID() (string, error) { | ||||
| 	return "", fmt.Errorf("Retrieving VM UUID on this build is not implemented.") | ||||
| } | ||||
| @@ -1,45 +0,0 @@ | ||||
| //go:build !providerless && windows | ||||
| // +build !providerless,windows | ||||
|  | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os/exec" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| func getRawUUID() (string, error) { | ||||
| 	result, err := exec.Command("wmic", "bios", "get", "serialnumber").Output() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	lines := strings.FieldsFunc(string(result), func(r rune) bool { | ||||
| 		switch r { | ||||
| 		case '\n', '\r': | ||||
| 			return true | ||||
| 		default: | ||||
| 			return false | ||||
| 		} | ||||
| 	}) | ||||
| 	if len(lines) != 2 { | ||||
| 		return "", fmt.Errorf("received unexpected value retrieving vm uuid: %q", string(result)) | ||||
| 	} | ||||
| 	return lines[1], nil | ||||
| } | ||||
| @@ -1,114 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2020 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	k8stypes "k8s.io/apimachinery/pkg/types" | ||||
| ) | ||||
|  | ||||
| type volumePath string | ||||
|  | ||||
| type nodeVolumeStatus struct { | ||||
| 	nodeName k8stypes.NodeName | ||||
| 	verified bool | ||||
| } | ||||
|  | ||||
| // VsphereVolumeMap stores last known state of node and volume mapping | ||||
| type VsphereVolumeMap struct { | ||||
| 	volumeNodeMap map[volumePath]nodeVolumeStatus | ||||
| 	nodeMap       map[k8stypes.NodeName]bool | ||||
| 	lock          sync.RWMutex | ||||
| } | ||||
|  | ||||
| func NewVsphereVolumeMap() *VsphereVolumeMap { | ||||
| 	return &VsphereVolumeMap{ | ||||
| 		volumeNodeMap: map[volumePath]nodeVolumeStatus{}, | ||||
| 		nodeMap:       map[k8stypes.NodeName]bool{}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // StartDiskVerification marks all known volumes as unverified so as | ||||
| // disks which aren't verified can be removed at the end of verification process | ||||
| func (vsphereVolume *VsphereVolumeMap) StartDiskVerification() { | ||||
| 	vsphereVolume.lock.Lock() | ||||
| 	defer vsphereVolume.lock.Unlock() | ||||
| 	for k, v := range vsphereVolume.volumeNodeMap { | ||||
| 		v.verified = false | ||||
| 		vsphereVolume.volumeNodeMap[k] = v | ||||
| 	} | ||||
| 	// reset nodeMap to empty so that any node we could not verify via usual verification process | ||||
| 	// can still be verified. | ||||
| 	vsphereVolume.nodeMap = map[k8stypes.NodeName]bool{} | ||||
| } | ||||
|  | ||||
| // CheckForVolume verifies if disk is attached to some node in the cluster. | ||||
| // This check is not definitive and should be followed up by separate verification. | ||||
| func (vsphereVolume *VsphereVolumeMap) CheckForVolume(path string) (k8stypes.NodeName, bool) { | ||||
| 	vsphereVolume.lock.RLock() | ||||
| 	defer vsphereVolume.lock.RUnlock() | ||||
| 	vPath := volumePath(path) | ||||
| 	ns, ok := vsphereVolume.volumeNodeMap[vPath] | ||||
| 	if ok { | ||||
| 		return ns.nodeName, true | ||||
| 	} | ||||
| 	return "", false | ||||
| } | ||||
|  | ||||
| // CheckForNode returns true if given node has already been processed by volume | ||||
| // verification mechanism. This is used to skip verifying attached disks on nodes | ||||
| // which were previously verified. | ||||
| func (vsphereVolume *VsphereVolumeMap) CheckForNode(nodeName k8stypes.NodeName) bool { | ||||
| 	vsphereVolume.lock.RLock() | ||||
| 	defer vsphereVolume.lock.RUnlock() | ||||
| 	_, ok := vsphereVolume.nodeMap[nodeName] | ||||
| 	return ok | ||||
| } | ||||
|  | ||||
| // Add all devices found on a node to the device map | ||||
| func (vsphereVolume *VsphereVolumeMap) Add(node k8stypes.NodeName, vmDevices object.VirtualDeviceList) { | ||||
| 	vsphereVolume.lock.Lock() | ||||
| 	defer vsphereVolume.lock.Unlock() | ||||
| 	for _, device := range vmDevices { | ||||
| 		if vmDevices.TypeName(device) == "VirtualDisk" { | ||||
| 			virtualDevice := device.GetVirtualDevice() | ||||
| 			if backing, ok := virtualDevice.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { | ||||
| 				filename := volumePath(backing.FileName) | ||||
| 				vsphereVolume.volumeNodeMap[filename] = nodeVolumeStatus{node, true} | ||||
| 				vsphereVolume.nodeMap[node] = true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RemoveUnverified will remove any device which we could not verify to be attached to a node. | ||||
| func (vsphereVolume *VsphereVolumeMap) RemoveUnverified() { | ||||
| 	vsphereVolume.lock.Lock() | ||||
| 	defer vsphereVolume.lock.Unlock() | ||||
| 	for k, v := range vsphereVolume.volumeNodeMap { | ||||
| 		if !v.verified { | ||||
| 			delete(vsphereVolume.volumeNodeMap, k) | ||||
| 			delete(vsphereVolume.nodeMap, v.nodeName) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
| @@ -1,117 +0,0 @@ | ||||
| //go:build !providerless | ||||
| // +build !providerless | ||||
|  | ||||
| /* | ||||
| Copyright 2020 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"testing" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	k8stypes "k8s.io/apimachinery/pkg/types" | ||||
| ) | ||||
|  | ||||
| func TestVsphereVolumeMap(t *testing.T) { | ||||
| 	tests := []struct { | ||||
| 		name        string | ||||
| 		deviceToAdd object.VirtualDeviceList | ||||
| 		nodeToAdd   k8stypes.NodeName | ||||
| 		checkRunner func(volumeMap *VsphereVolumeMap) | ||||
| 	}{ | ||||
| 		{ | ||||
| 			name:        "adding new volume", | ||||
| 			deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"), | ||||
| 			nodeToAdd:   convertToK8sType("node1.lan"), | ||||
| 			checkRunner: func(volumeMap *VsphereVolumeMap) { | ||||
| 				volumeToCheck := "[foobar] kubevols/foo.vmdk" | ||||
| 				_, ok := volumeMap.CheckForVolume(volumeToCheck) | ||||
| 				if !ok { | ||||
| 					t.Errorf("error checking volume %s, expected true got %v", volumeToCheck, ok) | ||||
| 				} | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "mismatching volume", | ||||
| 			deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"), | ||||
| 			nodeToAdd:   convertToK8sType("node1.lan"), | ||||
| 			checkRunner: func(volumeMap *VsphereVolumeMap) { | ||||
| 				volumeToCheck := "[foobar] kubevols/bar.vmdk" | ||||
| 				_, ok := volumeMap.CheckForVolume(volumeToCheck) | ||||
| 				if ok { | ||||
| 					t.Errorf("error checking volume %s, expected false got %v", volumeToCheck, ok) | ||||
| 				} | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "should remove unverified devices", | ||||
| 			deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"), | ||||
| 			nodeToAdd:   convertToK8sType("node1.lan"), | ||||
| 			checkRunner: func(volumeMap *VsphereVolumeMap) { | ||||
| 				volumeMap.StartDiskVerification() | ||||
| 				volumeMap.RemoveUnverified() | ||||
| 				volumeToCheck := "[foobar] kubevols/foo.vmdk" | ||||
| 				_, ok := volumeMap.CheckForVolume(volumeToCheck) | ||||
| 				if ok { | ||||
| 					t.Errorf("error checking volume %s, expected false got %v", volumeToCheck, ok) | ||||
| 				} | ||||
| 				node := k8stypes.NodeName("node1.lan") | ||||
| 				ok = volumeMap.CheckForNode(node) | ||||
| 				if ok { | ||||
| 					t.Errorf("unexpected node %s in node map", node) | ||||
| 				} | ||||
| 			}, | ||||
| 		}, | ||||
| 		{ | ||||
| 			name:        "node check should return false for previously added node", | ||||
| 			deviceToAdd: getVirtualDeviceList("[foobar] kubevols/foo.vmdk"), | ||||
| 			nodeToAdd:   convertToK8sType("node1.lan"), | ||||
| 			checkRunner: func(volumeMap *VsphereVolumeMap) { | ||||
| 				volumeMap.StartDiskVerification() | ||||
| 				node := k8stypes.NodeName("node1.lan") | ||||
| 				ok := volumeMap.CheckForNode(node) | ||||
| 				if ok { | ||||
| 					t.Errorf("unexpected node %s in node map", node) | ||||
| 				} | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, tc := range tests { | ||||
| 		t.Run(tc.name, func(t *testing.T) { | ||||
| 			vMap := NewVsphereVolumeMap() | ||||
| 			vMap.Add(tc.nodeToAdd, tc.deviceToAdd) | ||||
| 			tc.checkRunner(vMap) | ||||
| 		}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func getVirtualDeviceList(vPath string) object.VirtualDeviceList { | ||||
| 	return object.VirtualDeviceList{ | ||||
| 		&types.VirtualDisk{ | ||||
| 			VirtualDevice: types.VirtualDevice{ | ||||
| 				Key: 1000, | ||||
| 				Backing: &types.VirtualDiskFlatVer2BackingInfo{ | ||||
| 					VirtualDeviceFileBackingInfo: types.VirtualDeviceFileBackingInfo{ | ||||
| 						FileName: vPath, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
| @@ -61,7 +61,6 @@ import ( | ||||
| 	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" | ||||
| 	storageframework "k8s.io/kubernetes/test/e2e/storage/framework" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
| ) | ||||
|  | ||||
| @@ -1185,7 +1184,6 @@ type vSphereDriver struct { | ||||
|  | ||||
| type vSphereVolume struct { | ||||
| 	volumePath string | ||||
| 	nodeInfo   *vspheretest.NodeInfo | ||||
| } | ||||
|  | ||||
| var _ storageframework.TestDriver = &vSphereDriver{} | ||||
| @@ -1286,17 +1284,6 @@ func (v *vSphereDriver) GetDynamicProvisionStorageClass(ctx context.Context, con | ||||
| } | ||||
|  | ||||
| func (v *vSphereDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig { | ||||
| 	vspheretest.Bootstrap(f) | ||||
| 	ginkgo.DeferCleanup(func(ctx context.Context) { | ||||
| 		// Driver Cleanup function | ||||
| 		// Logout each vSphere client connection to prevent session leakage | ||||
| 		nodes := vspheretest.GetReadySchedulableNodeInfos(ctx, f.ClientSet) | ||||
| 		for _, node := range nodes { | ||||
| 			if node.VSphere.Client != nil { | ||||
| 				_ = node.VSphere.Client.Logout(ctx) | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| 	return &storageframework.PerTestConfig{ | ||||
| 		Driver:    v, | ||||
| 		Prefix:    "vsphere", | ||||
| @@ -1305,18 +1292,10 @@ func (v *vSphereDriver) PrepareTest(ctx context.Context, f *framework.Framework) | ||||
| } | ||||
|  | ||||
| func (v *vSphereDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { | ||||
| 	f := config.Framework | ||||
| 	nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo(ctx, f.ClientSet) | ||||
| 	volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	return &vSphereVolume{ | ||||
| 		volumePath: volumePath, | ||||
| 		nodeInfo:   nodeInfo, | ||||
| 	} | ||||
| 	return &vSphereVolume{} | ||||
| } | ||||
|  | ||||
| func (v *vSphereVolume) DeleteVolume(ctx context.Context) { | ||||
| 	v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef) | ||||
| } | ||||
|  | ||||
| // Azure Disk | ||||
|   | ||||
| @@ -1,66 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
|  | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| ) | ||||
|  | ||||
| var once sync.Once | ||||
| var waiting = make(chan bool) | ||||
|  | ||||
| // Bootstrap takes care of initializing necessary test context for vSphere tests | ||||
| func Bootstrap(fw *framework.Framework) { | ||||
| 	done := make(chan bool) | ||||
| 	go func() { | ||||
| 		once.Do(func() { | ||||
| 			bootstrapOnce(fw) | ||||
| 		}) | ||||
| 		<-waiting | ||||
| 		done <- true | ||||
| 	}() | ||||
| 	<-done | ||||
| } | ||||
|  | ||||
| func bootstrapOnce(f *framework.Framework) { | ||||
| 	// 1. Read vSphere conf and get VSphere instances | ||||
| 	vsphereInstances, err := GetVSphereInstances() | ||||
| 	if err != nil { | ||||
| 		framework.Failf("Failed to bootstrap vSphere with error: %v", err) | ||||
| 	} | ||||
| 	// 2. Get all nodes | ||||
| 	nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		framework.Failf("Failed to get nodes: %v", err) | ||||
| 	} | ||||
| 	TestContext = Context{NodeMapper: NewNodeMapper(), VSphereInstances: vsphereInstances} | ||||
| 	// 3. Get Node to VSphere mapping | ||||
| 	err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList) | ||||
| 	if err != nil { | ||||
| 		framework.Failf("Failed to bootstrap vSphere with error: %v", err) | ||||
| 	} | ||||
| 	// 4. Generate Zone to Datastore mapping | ||||
| 	err = TestContext.NodeMapper.GenerateZoneToDatastoreMap() | ||||
| 	if err != nil { | ||||
| 		framework.Failf("Failed to generate zone to datastore mapping with error: %v", err) | ||||
| 	} | ||||
| 	close(waiting) | ||||
| } | ||||
| @@ -1,189 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"os" | ||||
|  | ||||
| 	"gopkg.in/gcfg.v1" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	vSphereConfFileEnvVar = "VSPHERE_CONF_FILE" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	confFileLocation = os.Getenv(vSphereConfFileEnvVar) | ||||
| ) | ||||
|  | ||||
| // Config represents vSphere configuration | ||||
| type Config struct { | ||||
| 	Username          string | ||||
| 	Password          string | ||||
| 	Hostname          string | ||||
| 	Port              string | ||||
| 	Datacenters       string | ||||
| 	RoundTripperCount uint | ||||
| 	DefaultDatastore  string | ||||
| 	Folder            string | ||||
| } | ||||
|  | ||||
| // ConfigFile represents the content of vsphere.conf file. | ||||
| // Users specify the configuration of one or more vSphere instances in vsphere.conf where | ||||
| // the Kubernetes master and worker nodes are running. | ||||
| type ConfigFile struct { | ||||
| 	Global struct { | ||||
| 		// vCenter username. | ||||
| 		User string `gcfg:"user"` | ||||
| 		// vCenter password in clear text. | ||||
| 		Password string `gcfg:"password"` | ||||
| 		// vCenter port. | ||||
| 		VCenterPort string `gcfg:"port"` | ||||
| 		// True if vCenter uses self-signed cert. | ||||
| 		InsecureFlag bool `gcfg:"insecure-flag"` | ||||
| 		// Datacenter in which VMs are located. | ||||
| 		Datacenters string `gcfg:"datacenters"` | ||||
| 		// Soap round tripper count (retries = RoundTripper - 1) | ||||
| 		RoundTripperCount uint `gcfg:"soap-roundtrip-count"` | ||||
| 	} | ||||
|  | ||||
| 	VirtualCenter map[string]*Config | ||||
|  | ||||
| 	Network struct { | ||||
| 		// PublicNetwork is name of the network the VMs are joined to. | ||||
| 		PublicNetwork string `gcfg:"public-network"` | ||||
| 	} | ||||
|  | ||||
| 	Disk struct { | ||||
| 		// SCSIControllerType defines SCSI controller to be used. | ||||
| 		SCSIControllerType string `dcfg:"scsicontrollertype"` | ||||
| 	} | ||||
|  | ||||
| 	// Endpoint used to create volumes | ||||
| 	Workspace struct { | ||||
| 		VCenterIP        string `gcfg:"server"` | ||||
| 		Datacenter       string `gcfg:"datacenter"` | ||||
| 		Folder           string `gcfg:"folder"` | ||||
| 		DefaultDatastore string `gcfg:"default-datastore"` | ||||
| 		ResourcePoolPath string `gcfg:"resourcepool-path"` | ||||
| 	} | ||||
| 	// Tag categories and tags which correspond to "built-in node labels: zones and region" | ||||
| 	Labels struct { | ||||
| 		Zone   string `gcfg:"zone"` | ||||
| 		Region string `gcfg:"region"` | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GetVSphereInstances parses vsphere.conf and returns VSphere instances | ||||
| func GetVSphereInstances() (map[string]*VSphere, error) { | ||||
| 	cfg, err := getConfig() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return populateInstanceMap(cfg) | ||||
| } | ||||
|  | ||||
| func getConfig() (*ConfigFile, error) { | ||||
| 	if confFileLocation == "" { | ||||
| 		if framework.TestContext.CloudConfig.ConfigFile == "" { | ||||
| 			return nil, fmt.Errorf("env variable 'VSPHERE_CONF_FILE' is not set, and no config-file specified") | ||||
| 		} | ||||
| 		confFileLocation = framework.TestContext.CloudConfig.ConfigFile | ||||
| 	} | ||||
| 	confFile, err := os.Open(confFileLocation) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer confFile.Close() | ||||
| 	cfg, err := readConfig(confFile) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return &cfg, nil | ||||
| } | ||||
|  | ||||
| // readConfig parses vSphere cloud config file into ConfigFile. | ||||
| func readConfig(config io.Reader) (ConfigFile, error) { | ||||
| 	if config == nil { | ||||
| 		err := fmt.Errorf("no vSphere cloud provider config file given") | ||||
| 		return ConfigFile{}, err | ||||
| 	} | ||||
|  | ||||
| 	var cfg ConfigFile | ||||
| 	err := gcfg.ReadInto(&cfg, config) | ||||
| 	return cfg, err | ||||
| } | ||||
|  | ||||
| func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) { | ||||
| 	vsphereInstances := make(map[string]*VSphere) | ||||
|  | ||||
| 	if cfg.Workspace.VCenterIP == "" || cfg.Workspace.DefaultDatastore == "" || cfg.Workspace.Folder == "" || cfg.Workspace.Datacenter == "" { | ||||
| 		msg := fmt.Sprintf("All fields in workspace are mandatory."+ | ||||
| 			" vsphere.conf does not have the workspace specified correctly. cfg.Workspace: %+v", cfg.Workspace) | ||||
| 		framework.Logf(msg) | ||||
| 		return nil, errors.New(msg) | ||||
| 	} | ||||
| 	for vcServer, vcConfig := range cfg.VirtualCenter { | ||||
| 		framework.Logf("Initializing vc server %s", vcServer) | ||||
| 		if vcServer == "" { | ||||
| 			framework.Logf("vsphere.conf does not have the VirtualCenter IP address specified") | ||||
| 			return nil, errors.New("vsphere.conf does not have the VirtualCenter IP address specified") | ||||
| 		} | ||||
| 		vcConfig.Hostname = vcServer | ||||
|  | ||||
| 		if vcConfig.Username == "" { | ||||
| 			vcConfig.Username = cfg.Global.User | ||||
| 		} | ||||
| 		if vcConfig.Password == "" { | ||||
| 			vcConfig.Password = cfg.Global.Password | ||||
| 		} | ||||
| 		if vcConfig.Username == "" { | ||||
| 			msg := fmt.Sprintf("vcConfig.Username is empty for vc %s!", vcServer) | ||||
| 			framework.Logf(msg) | ||||
| 			return nil, errors.New(msg) | ||||
| 		} | ||||
| 		if vcConfig.Password == "" { | ||||
| 			msg := fmt.Sprintf("vcConfig.Password is empty for vc %s!", vcServer) | ||||
| 			framework.Logf(msg) | ||||
| 			return nil, errors.New(msg) | ||||
| 		} | ||||
| 		if vcConfig.Port == "" { | ||||
| 			vcConfig.Port = cfg.Global.VCenterPort | ||||
| 		} | ||||
| 		if vcConfig.Datacenters == "" && cfg.Global.Datacenters != "" { | ||||
| 			vcConfig.Datacenters = cfg.Global.Datacenters | ||||
| 		} | ||||
| 		if vcConfig.RoundTripperCount == 0 { | ||||
| 			vcConfig.RoundTripperCount = cfg.Global.RoundTripperCount | ||||
| 		} | ||||
|  | ||||
| 		vcConfig.DefaultDatastore = cfg.Workspace.DefaultDatastore | ||||
| 		vcConfig.Folder = cfg.Workspace.Folder | ||||
|  | ||||
| 		vsphereIns := VSphere{ | ||||
| 			Config: vcConfig, | ||||
| 		} | ||||
| 		vsphereInstances[vcServer] = &vsphereIns | ||||
| 	} | ||||
|  | ||||
| 	framework.Logf("vSphere instances: %v", vsphereInstances) | ||||
| 	return vsphereInstances, nil | ||||
| } | ||||
| @@ -1,92 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 	neturl "net/url" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/session" | ||||
| 	"github.com/vmware/govmomi/vim25" | ||||
| 	"k8s.io/klog/v2" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	roundTripperDefaultCount = 3 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	clientLock sync.Mutex | ||||
| ) | ||||
|  | ||||
| // Connect makes connection to vSphere | ||||
| // No actions are taken if a connection exists and alive. Otherwise, a new client will be created. | ||||
| func Connect(ctx context.Context, vs *VSphere) error { | ||||
| 	var err error | ||||
| 	clientLock.Lock() | ||||
| 	defer clientLock.Unlock() | ||||
|  | ||||
| 	if vs.Client == nil { | ||||
| 		vs.Client, err = NewClient(ctx, vs) | ||||
| 		if err != nil { | ||||
| 			klog.Errorf("Failed to create govmomi client. err: %+v", err) | ||||
| 			return err | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	manager := session.NewManager(vs.Client.Client) | ||||
| 	userSession, err := manager.UserSession(ctx) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Error while obtaining user session. err: %+v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	if userSession != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	klog.Warningf("Creating new client session since the existing session is not valid or not authenticated") | ||||
| 	vs.Client.Logout(ctx) | ||||
| 	vs.Client, err = NewClient(ctx, vs) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create govmomi client. err: %+v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // NewClient creates a new client for vSphere connection | ||||
| func NewClient(ctx context.Context, vs *VSphere) (*govmomi.Client, error) { | ||||
| 	url, err := neturl.Parse(fmt.Sprintf("https://%s/sdk", net.JoinHostPort(vs.Config.Hostname, vs.Config.Port))) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to parse URL: %s. err: %+v", url, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	url.User = neturl.UserPassword(vs.Config.Username, vs.Config.Password) | ||||
| 	client, err := govmomi.NewClient(ctx, url, true) | ||||
| 	if err != nil { | ||||
| 		klog.Errorf("Failed to create new client. err: %+v", err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if vs.Config.RoundTripperCount == 0 { | ||||
| 		vs.Config.RoundTripperCount = roundTripperDefaultCount | ||||
| 	} | ||||
| 	client.RoundTripper = vim25.Retry(client.RoundTripper, vim25.TemporaryNetworkError(int(vs.Config.RoundTripperCount))) | ||||
| 	return client, nil | ||||
| } | ||||
| @@ -1,26 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| // Context holds common information for vSphere tests | ||||
| type Context struct { | ||||
| 	NodeMapper       *NodeMapper | ||||
| 	VSphereInstances map[string]*VSphere | ||||
| } | ||||
|  | ||||
| // TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests. | ||||
| var TestContext Context | ||||
| @@ -1,304 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vapi/rest" | ||||
| 	"github.com/vmware/govmomi/vapi/tags" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
|  | ||||
| 	neturl "net/url" | ||||
| ) | ||||
|  | ||||
| // NodeMapper contains information to generate nameToNodeInfo and vcToZoneDatastore maps | ||||
| type NodeMapper struct { | ||||
| 	nodeInfoRWLock        *sync.RWMutex | ||||
| 	nameToNodeInfo        map[string]*NodeInfo | ||||
| 	vcToZoneDatastoresMap map[string](map[string][]string) | ||||
| } | ||||
|  | ||||
| // NodeInfo contains information about vcenter nodes | ||||
| type NodeInfo struct { | ||||
| 	Name              string | ||||
| 	DataCenterRef     types.ManagedObjectReference | ||||
| 	VirtualMachineRef types.ManagedObjectReference | ||||
| 	HostSystemRef     types.ManagedObjectReference | ||||
| 	VSphere           *VSphere | ||||
| 	Zones             []string | ||||
| } | ||||
|  | ||||
| const ( | ||||
| 	datacenterType             = "Datacenter" | ||||
| 	clusterComputeResourceType = "ClusterComputeResource" | ||||
| 	hostSystemType             = "HostSystem" | ||||
| ) | ||||
|  | ||||
| // NewNodeMapper returns a new NodeMapper | ||||
| func NewNodeMapper() *NodeMapper { | ||||
| 	return &NodeMapper{ | ||||
| 		nodeInfoRWLock:        &sync.RWMutex{}, | ||||
| 		nameToNodeInfo:        make(map[string]*NodeInfo), | ||||
| 		vcToZoneDatastoresMap: make(map[string](map[string][]string)), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GenerateNodeMap populates node name to node info map | ||||
| func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error { | ||||
| 	type VMSearch struct { | ||||
| 		vs         *VSphere | ||||
| 		datacenter *object.Datacenter | ||||
| 	} | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
| 	var queueChannel []*VMSearch | ||||
|  | ||||
| 	var datacenters []*object.Datacenter | ||||
| 	var err error | ||||
| 	for _, vs := range vSphereInstances { | ||||
|  | ||||
| 		// Create context | ||||
| 		ctx, cancel := context.WithCancel(context.Background()) | ||||
| 		defer cancel() | ||||
|  | ||||
| 		if vs.Config.Datacenters == "" { | ||||
| 			datacenters, err = vs.GetAllDatacenter(ctx) | ||||
| 			if err != nil { | ||||
| 				framework.Logf("NodeMapper error: %v", err) | ||||
| 				continue | ||||
| 			} | ||||
| 		} else { | ||||
| 			dcName := strings.Split(vs.Config.Datacenters, ",") | ||||
| 			for _, dc := range dcName { | ||||
| 				dc = strings.TrimSpace(dc) | ||||
| 				if dc == "" { | ||||
| 					continue | ||||
| 				} | ||||
| 				datacenter, err := vs.GetDatacenter(ctx, dc) | ||||
| 				if err != nil { | ||||
| 					framework.Logf("NodeMapper error dc: %s \n err: %v", dc, err) | ||||
|  | ||||
| 					continue | ||||
| 				} | ||||
| 				datacenters = append(datacenters, datacenter) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for _, dc := range datacenters { | ||||
| 			framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name()) | ||||
| 			queueChannel = append(queueChannel, &VMSearch{vs: vs, datacenter: dc}) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, node := range nodeList.Items { | ||||
| 		n := node | ||||
| 		wg.Add(1) | ||||
| 		go func() { | ||||
| 			nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID) | ||||
| 			framework.Logf("Searching for node with UUID: %s", nodeUUID) | ||||
| 			for _, res := range queueChannel { | ||||
| 				ctx, cancel := context.WithCancel(context.Background()) | ||||
| 				defer cancel() | ||||
| 				vm, err := res.vs.GetVMByUUID(ctx, nodeUUID, res.datacenter) | ||||
| 				if err != nil { | ||||
| 					framework.Logf("Error %v while looking for node=%s in vc=%s and datacenter=%s", | ||||
| 						err, n.Name, res.vs.Config.Hostname, res.datacenter.Name()) | ||||
| 					continue | ||||
| 				} | ||||
| 				if vm != nil { | ||||
| 					hostSystemRef := res.vs.GetHostFromVMReference(ctx, vm.Reference()) | ||||
| 					zones := retrieveZoneInformationForNode(n.Name, res.vs, hostSystemRef) | ||||
| 					framework.Logf("Found node %s as vm=%+v placed on host=%+v under zones %s in vc=%s and datacenter=%s", | ||||
| 						n.Name, vm, hostSystemRef, zones, res.vs.Config.Hostname, res.datacenter.Name()) | ||||
| 					nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), HostSystemRef: hostSystemRef, VSphere: res.vs, Zones: zones} | ||||
| 					nm.SetNodeInfo(n.Name, nodeInfo) | ||||
| 					break | ||||
| 				} | ||||
| 			} | ||||
| 			wg.Done() | ||||
| 		}() | ||||
| 	} | ||||
| 	wg.Wait() | ||||
|  | ||||
| 	if len(nm.nameToNodeInfo) != len(nodeList.Items) { | ||||
| 		return errors.New("all nodes not mapped to respective vSphere") | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Establish rest connection to retrieve tag manager stub | ||||
| func withTagsClient(ctx context.Context, connection *VSphere, f func(c *rest.Client) error) error { | ||||
| 	c := rest.NewClient(connection.Client.Client) | ||||
| 	user := neturl.UserPassword(connection.Config.Username, connection.Config.Password) | ||||
| 	if err := c.Login(ctx, user); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	ginkgo.DeferCleanup(c.Logout) | ||||
| 	return f(c) | ||||
| } | ||||
|  | ||||
| // Iterates over each node and retrieves the zones in which they are placed | ||||
| func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSystemRef types.ManagedObjectReference) []string { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	var zones []string | ||||
| 	pc := connection.Client.ServiceContent.PropertyCollector | ||||
| 	withTagsClient(ctx, connection, func(c *rest.Client) error { | ||||
| 		client := tags.NewManager(c) | ||||
| 		// Example result: ["Host", "Cluster", "Datacenter"] | ||||
| 		ancestors, err := mo.Ancestors(ctx, connection.Client, pc, hostSystemRef) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		var validAncestors []mo.ManagedEntity | ||||
| 		// Filter out only Datacenter, ClusterComputeResource and HostSystem type objects. These objects will be | ||||
| 		// in the following order ["Datacenter" < "ClusterComputeResource" < "HostSystem"] so that the highest | ||||
| 		// zone precedence will be received by the HostSystem type. | ||||
| 		for _, ancestor := range ancestors { | ||||
| 			moType := ancestor.ExtensibleManagedObject.Self.Type | ||||
| 			if moType == datacenterType || moType == clusterComputeResourceType || moType == hostSystemType { | ||||
| 				validAncestors = append(validAncestors, ancestor) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for _, ancestor := range validAncestors { | ||||
| 			var zonesAttachedToObject []string | ||||
| 			tags, err := client.ListAttachedTags(ctx, ancestor) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 			for _, value := range tags { | ||||
| 				tag, err := client.GetTag(ctx, value) | ||||
| 				if err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 				category, err := client.GetCategory(ctx, tag.CategoryID) | ||||
| 				if err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 				switch { | ||||
| 				case category.Name == "k8s-zone": | ||||
| 					framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName) | ||||
| 					zonesAttachedToObject = append(zonesAttachedToObject, tag.Name) | ||||
| 				case category.Name == "k8s-region": | ||||
| 					framework.Logf("Found %s associated with %s for %s", tag.Name, ancestor.Name, nodeName) | ||||
| 				} | ||||
| 			} | ||||
| 			// Overwrite zone information if it exists for this object | ||||
| 			if len(zonesAttachedToObject) != 0 { | ||||
| 				zones = zonesAttachedToObject | ||||
| 			} | ||||
| 		} | ||||
| 		return nil | ||||
| 	}) | ||||
| 	return zones | ||||
| } | ||||
|  | ||||
| // GenerateZoneToDatastoreMap generates a mapping of zone to datastore for easily verifying volume placement | ||||
| func (nm *NodeMapper) GenerateZoneToDatastoreMap() error { | ||||
| 	// 1. Create zone to hosts map for each VC | ||||
| 	var vcToZoneHostsMap = make(map[string](map[string][]string)) | ||||
| 	// 2. Create host to datastores map for each VC | ||||
| 	var vcToHostDatastoresMap = make(map[string](map[string][]string)) | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	// 3. Populate vcToZoneHostsMap and vcToHostDatastoresMap | ||||
| 	for _, nodeInfo := range nm.nameToNodeInfo { | ||||
| 		vc := nodeInfo.VSphere.Config.Hostname | ||||
| 		host := nodeInfo.HostSystemRef.Value | ||||
| 		for _, zone := range nodeInfo.Zones { | ||||
| 			if vcToZoneHostsMap[vc] == nil { | ||||
| 				vcToZoneHostsMap[vc] = make(map[string][]string) | ||||
| 			} | ||||
| 			// Populating vcToZoneHostsMap using the HostSystemRef and Zone fields from each NodeInfo | ||||
| 			hosts := vcToZoneHostsMap[vc][zone] | ||||
| 			hosts = append(hosts, host) | ||||
| 			vcToZoneHostsMap[vc][zone] = hosts | ||||
| 		} | ||||
| 		if vcToHostDatastoresMap[vc] == nil { | ||||
| 			vcToHostDatastoresMap[vc] = make(map[string][]string) | ||||
| 		} | ||||
| 		datastores := vcToHostDatastoresMap[vc][host] | ||||
| 		// Populating vcToHostDatastoresMap by finding out the datastores mounted on node's host | ||||
| 		datastoreRefs := nodeInfo.VSphere.GetDatastoresMountedOnHost(ctx, nodeInfo.HostSystemRef) | ||||
| 		for _, datastore := range datastoreRefs { | ||||
| 			datastores = append(datastores, datastore.Value) | ||||
| 		} | ||||
| 		vcToHostDatastoresMap[vc][host] = datastores | ||||
| 	} | ||||
| 	// 4, Populate vcToZoneDatastoresMap from vcToZoneHostsMap and vcToHostDatastoresMap | ||||
| 	for vc, zoneToHostsMap := range vcToZoneHostsMap { | ||||
| 		for zone, hosts := range zoneToHostsMap { | ||||
| 			commonDatastores := retrieveCommonDatastoresAmongHosts(hosts, vcToHostDatastoresMap[vc]) | ||||
| 			if nm.vcToZoneDatastoresMap[vc] == nil { | ||||
| 				nm.vcToZoneDatastoresMap[vc] = make(map[string][]string) | ||||
| 			} | ||||
| 			nm.vcToZoneDatastoresMap[vc][zone] = commonDatastores | ||||
| 		} | ||||
| 	} | ||||
| 	framework.Logf("Zone to datastores map : %+v", nm.vcToZoneDatastoresMap) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // retrieveCommonDatastoresAmongHosts retrieves the common datastores from the specified hosts | ||||
| func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[string][]string) []string { | ||||
| 	var datastoreCountMap = make(map[string]int) | ||||
| 	for _, host := range hosts { | ||||
| 		for _, datastore := range hostToDatastoresMap[host] { | ||||
| 			datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1 | ||||
| 		} | ||||
| 	} | ||||
| 	var commonDatastores []string | ||||
| 	numHosts := len(hosts) | ||||
| 	for datastore, count := range datastoreCountMap { | ||||
| 		if count == numHosts { | ||||
| 			commonDatastores = append(commonDatastores, datastore) | ||||
| 		} | ||||
| 	} | ||||
| 	return commonDatastores | ||||
| } | ||||
|  | ||||
| // GetDatastoresInZone returns all the datastores in the specified zone | ||||
| func (nm *NodeMapper) GetDatastoresInZone(vc string, zone string) []string { | ||||
| 	nm.nodeInfoRWLock.RLock() | ||||
| 	defer nm.nodeInfoRWLock.RUnlock() | ||||
| 	return nm.vcToZoneDatastoresMap[vc][zone] | ||||
| } | ||||
|  | ||||
| // GetNodeInfo returns NodeInfo for given nodeName | ||||
| func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo { | ||||
| 	nm.nodeInfoRWLock.RLock() | ||||
| 	defer nm.nodeInfoRWLock.RUnlock() | ||||
| 	return nm.nameToNodeInfo[nodeName] | ||||
| } | ||||
|  | ||||
| // SetNodeInfo sets NodeInfo for given nodeName. This function is not thread safe. Users need to handle concurrency. | ||||
| func (nm *NodeMapper) SetNodeInfo(nodeName string, nodeInfo *NodeInfo) { | ||||
| 	nm.nodeInfoRWLock.Lock() | ||||
| 	defer nm.nodeInfoRWLock.Unlock() | ||||
| 	nm.nameToNodeInfo[nodeName] = nodeInfo | ||||
| } | ||||
| @@ -1,202 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/labels" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| // Testing configurations of single a PV/PVC pair attached to a vSphere Disk | ||||
| var _ = utils.SIGDescribe("PersistentVolumes:vsphere", feature.Vsphere, func() { | ||||
| 	var ( | ||||
| 		c          clientset.Interface | ||||
| 		ns         string | ||||
| 		volumePath string | ||||
| 		pv         *v1.PersistentVolume | ||||
| 		pvc        *v1.PersistentVolumeClaim | ||||
| 		clientPod  *v1.Pod | ||||
| 		pvConfig   e2epv.PersistentVolumeConfig | ||||
| 		pvcConfig  e2epv.PersistentVolumeClaimConfig | ||||
| 		err        error | ||||
| 		node       string | ||||
| 		volLabel   labels.Set | ||||
| 		selector   *metav1.LabelSelector | ||||
| 		nodeInfo   *NodeInfo | ||||
| 	) | ||||
|  | ||||
| 	f := framework.NewDefaultFramework("pv") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	/* | ||||
| 		Test Setup | ||||
|  | ||||
| 		1. Create volume (vmdk) | ||||
| 		2. Create PV with volume path for the vmdk. | ||||
| 		3. Create PVC to bind with PV. | ||||
| 		4. Create a POD using the PVC. | ||||
| 		5. Verify Disk and Attached to the node. | ||||
| 	*/ | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		c = f.ClientSet | ||||
| 		ns = f.Namespace.Name | ||||
| 		clientPod = nil | ||||
| 		pvc = nil | ||||
| 		pv = nil | ||||
| 		nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, c) | ||||
|  | ||||
| 		volLabel = labels.Set{e2epv.VolumeSelectorKey: ns} | ||||
| 		selector = metav1.SetAsLabelSelector(volLabel) | ||||
|  | ||||
| 		volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) | ||||
| 		}) | ||||
| 		pvConfig = e2epv.PersistentVolumeConfig{ | ||||
| 			NamePrefix: "vspherepv-", | ||||
| 			Labels:     volLabel, | ||||
| 			PVSource: v1.PersistentVolumeSource{ | ||||
| 				VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ | ||||
| 					VolumePath: volumePath, | ||||
| 					FSType:     "ext4", | ||||
| 				}, | ||||
| 			}, | ||||
| 			Prebind: nil, | ||||
| 		} | ||||
| 		emptyStorageClass := "" | ||||
| 		pvcConfig = e2epv.PersistentVolumeClaimConfig{ | ||||
| 			Selector:         selector, | ||||
| 			StorageClassName: &emptyStorageClass, | ||||
| 		} | ||||
| 		ginkgo.By("Creating the PV and PVC") | ||||
| 		pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "AfterEach: failed to delete PV ", pv.Name) | ||||
| 		}) | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name) | ||||
| 		}) | ||||
| 		framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) | ||||
|  | ||||
| 		ginkgo.By("Creating the Client Pod") | ||||
| 		clientPod, err = e2epod.CreateClientPod(ctx, c, ns, pvc) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		node = clientPod.Spec.NodeName | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name) | ||||
| 		}) | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volumePath, node), "wait for vsphere disk to detach") | ||||
| 		}) | ||||
|  | ||||
| 		ginkgo.By("Verify disk should be attached to the node") | ||||
| 		isAttached, err := diskIsAttached(ctx, volumePath, node) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		if !isAttached { | ||||
| 			framework.Failf("Disk %s is not attached with the node", volumePath) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func(ctx context.Context) { | ||||
| 		ginkgo.By("Deleting the Claim") | ||||
| 		framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) | ||||
| 		pvc = nil | ||||
|  | ||||
| 		ginkgo.By("Deleting the Pod") | ||||
| 		framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Delete the PV and then the pod.  Expect the pod to succeed in unmounting and detaching PD on delete. | ||||
|  | ||||
| 		Test Steps: | ||||
| 		1. Delete PV. | ||||
| 		2. Delete POD, POD deletion should succeed. | ||||
| 	*/ | ||||
| 	ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vsphere volume detach", func(ctx context.Context) { | ||||
| 		ginkgo.By("Deleting the Persistent Volume") | ||||
| 		framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) | ||||
| 		pv = nil | ||||
|  | ||||
| 		ginkgo.By("Deleting the pod") | ||||
| 		framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod), "Failed to delete pod ", clientPod.Name) | ||||
| 	}) | ||||
| 	/* | ||||
| 		This test verifies that a volume mounted to a pod remains mounted after a kubelet restarts. | ||||
| 		Steps: | ||||
| 		1. Write to the volume | ||||
| 		2. Restart kubelet | ||||
| 		3. Verify that written file is accessible after kubelet restart | ||||
| 	*/ | ||||
| 	f.It("should test that a file written to the vsphere volume mount before kubelet restart can be read after restart", f.WithDisruptive(), func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessSSHKeyPresent() | ||||
| 		utils.TestKubeletRestartsAndRestoresMount(ctx, c, f, clientPod, e2epod.VolumeMountPath1) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		This test verifies that a volume mounted to a pod that is deleted while the kubelet is down | ||||
| 		unmounts volume when the kubelet returns. | ||||
|  | ||||
| 		Steps: | ||||
| 		1. Verify volume is mounted on the node. | ||||
| 		2. Stop kubelet. | ||||
| 		3. Delete pod. | ||||
| 		4. Start kubelet. | ||||
| 		5. Verify that volume mount not to be found. | ||||
| 	*/ | ||||
| 	f.It("should test that a vsphere volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns", f.WithDisruptive(), func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessSSHKeyPresent() | ||||
| 		utils.TestVolumeUnmountsFromDeletedPod(ctx, c, f, clientPod, e2epod.VolumeMountPath1) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		This test verifies that deleting the Namespace of a PVC and Pod causes the successful detach of Persistent Disk | ||||
|  | ||||
| 		Steps: | ||||
| 		1. Delete Namespace. | ||||
| 		2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods) | ||||
| 		3. Verify volume should be detached from the node. | ||||
| 	*/ | ||||
| 	ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func(ctx context.Context) { | ||||
| 		ginkgo.By("Deleting the Namespace") | ||||
| 		err := c.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		err = framework.WaitForNamespacesDeleted(ctx, c, []string{ns}, 3*time.Minute) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Verifying Persistent Disk detaches") | ||||
| 		err = waitForVSphereDiskToDetach(ctx, volumePath, node) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,257 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	apierrors "k8s.io/apimachinery/pkg/api/errors" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| var _ = utils.SIGDescribe("PersistentVolumes", feature.Vsphere, feature.ReclaimPolicy, func() { | ||||
| 	f := framework.NewDefaultFramework("persistentvolumereclaim") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		c          clientset.Interface | ||||
| 		ns         string | ||||
| 		volumePath string | ||||
| 		pv         *v1.PersistentVolume | ||||
| 		pvc        *v1.PersistentVolumeClaim | ||||
| 		nodeInfo   *NodeInfo | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		c = f.ClientSet | ||||
| 		ns = f.Namespace.Name | ||||
| 		framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) | ||||
| 	}) | ||||
|  | ||||
| 	f.Describe("persistentvolumereclaim:vsphere", feature.Vsphere, func() { | ||||
| 		ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 			e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 			ginkgo.DeferCleanup(testCleanupVSpherePersistentVolumeReclaim, c, nodeInfo, ns, volumePath, pv, pvc) | ||||
| 			Bootstrap(f) | ||||
| 			nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, c) | ||||
| 			pv = nil | ||||
| 			pvc = nil | ||||
| 			volumePath = "" | ||||
| 		}) | ||||
|  | ||||
| 		/* | ||||
| 			This test verifies persistent volume should be deleted when reclaimPolicy on the PV is set to delete and | ||||
| 			associated claim is deleted | ||||
|  | ||||
| 			Test Steps: | ||||
| 			1. Create vmdk | ||||
| 			2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Delete | ||||
| 			3. Create PVC with the storage request set to PV's storage capacity. | ||||
| 			4. Wait for PV and PVC to bound. | ||||
| 			5. Delete PVC | ||||
| 			6. Verify PV is deleted automatically. | ||||
| 		*/ | ||||
| 		ginkgo.It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func(ctx context.Context) { | ||||
| 			var err error | ||||
| 			volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(ctx, c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			deletePVCAfterBind(ctx, c, ns, pvc, pv, f.Timeouts) | ||||
| 			pvc = nil | ||||
|  | ||||
| 			ginkgo.By("verify pv is deleted") | ||||
| 			err = e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 3*time.Second, 300*time.Second) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			pv = nil | ||||
| 			volumePath = "" | ||||
| 		}) | ||||
|  | ||||
| 		/* | ||||
| 			Test Steps: | ||||
| 			1. Create vmdk | ||||
| 			2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Delete | ||||
| 			3. Create PVC with the storage request set to PV's storage capacity. | ||||
| 			4. Wait for PV and PVC to bound. | ||||
| 			5. Delete PVC. | ||||
| 			6. Verify volume is attached to the node and volume is accessible in the pod. | ||||
| 			7. Verify PV status should be failed. | ||||
| 			8. Delete the pod. | ||||
| 			9. Verify PV should be detached from the node and automatically deleted. | ||||
| 		*/ | ||||
| 		ginkgo.It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func(ctx context.Context) { | ||||
| 			var err error | ||||
|  | ||||
| 			volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(ctx, c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			// Wait for PV and PVC to Bind | ||||
| 			framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) | ||||
|  | ||||
| 			ginkgo.By("Creating the Pod") | ||||
| 			pod, err := e2epod.CreateClientPod(ctx, c, ns, pvc) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("Deleting the Claim") | ||||
| 			framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) | ||||
| 			pvc = nil | ||||
|  | ||||
| 			// Verify PV is Present, after PVC is deleted and PV status should be Failed. | ||||
| 			pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			err = e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("Verify the volume is attached to the node") | ||||
| 			isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) | ||||
| 			framework.ExpectNoError(verifyDiskAttachedError) | ||||
| 			if !isVolumeAttached { | ||||
| 				framework.Failf("Disk %s is not attached with the node %s", pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) | ||||
| 			} | ||||
|  | ||||
| 			ginkgo.By("Verify the volume is accessible and available in the pod") | ||||
| 			verifyVSphereVolumesAccessible(ctx, c, pod, []*v1.PersistentVolume{pv}) | ||||
| 			framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim") | ||||
|  | ||||
| 			ginkgo.By("Deleting the Pod") | ||||
| 			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod), "Failed to delete pod ", pod.Name) | ||||
|  | ||||
| 			ginkgo.By("Verify PV is detached from the node after Pod is deleted") | ||||
| 			err = waitForVSphereDiskToDetach(ctx, pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("Verify PV should be deleted automatically") | ||||
| 			framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 1*time.Second, 30*time.Second)) | ||||
| 			pv = nil | ||||
| 			volumePath = "" | ||||
| 		}) | ||||
|  | ||||
| 		/* | ||||
| 			This test Verify persistent volume should be retained when reclaimPolicy on the PV is set to retain | ||||
| 			and associated claim is deleted | ||||
|  | ||||
| 			Test Steps: | ||||
| 			1. Create vmdk | ||||
| 			2. Create PV Spec with volume path set to VMDK file created in Step-1, and PersistentVolumeReclaimPolicy is set to Retain | ||||
| 			3. Create PVC with the storage request set to PV's storage capacity. | ||||
| 			4. Wait for PV and PVC to bound. | ||||
| 			5. Write some content in the volume. | ||||
| 			6. Delete PVC | ||||
| 			7. Verify PV is retained. | ||||
| 			8. Delete retained PV. | ||||
| 			9. Create PV Spec with the same volume path used in step 2. | ||||
| 			10. Create PVC with the storage request set to PV's storage capacity. | ||||
| 			11. Created POD using PVC created in Step 10 and verify volume content is matching. | ||||
| 		*/ | ||||
|  | ||||
| 		ginkgo.It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func(ctx context.Context) { | ||||
| 			var err error | ||||
| 			var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) | ||||
|  | ||||
| 			volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(ctx, c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			writeContentToVSpherePV(ctx, c, f.Timeouts, pvc, volumeFileContent) | ||||
|  | ||||
| 			ginkgo.By("Delete PVC") | ||||
| 			framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) | ||||
| 			pvc = nil | ||||
|  | ||||
| 			ginkgo.By("Verify PV is retained") | ||||
| 			framework.Logf("Waiting for PV %v to become Released", pv.Name) | ||||
| 			err = e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) | ||||
|  | ||||
| 			ginkgo.By("Creating the PV for same volume path") | ||||
| 			pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) | ||||
| 			pv, err = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("creating the pvc") | ||||
| 			pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) | ||||
| 			pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("wait for the pv and pvc to bind") | ||||
| 			framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc)) | ||||
| 			verifyContentOfVSpherePV(ctx, c, f.Timeouts, pvc, volumeFileContent) | ||||
|  | ||||
| 		}) | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| // Test Setup for persistentvolumereclaim tests for vSphere Provider | ||||
| func testSetupVSpherePersistentVolumeReclaim(ctx context.Context, c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) { | ||||
| 	ginkgo.By("running testSetupVSpherePersistentVolumeReclaim") | ||||
| 	ginkgo.By("creating vmdk") | ||||
| 	volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	ginkgo.By("creating the pv") | ||||
| 	pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil) | ||||
| 	pv, err = c.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	ginkgo.By("creating the pvc") | ||||
| 	pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) | ||||
| 	pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Test Cleanup for persistentvolumereclaim tests for vSphere Provider | ||||
| func testCleanupVSpherePersistentVolumeReclaim(ctx context.Context, c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { | ||||
| 	ginkgo.By("running testCleanupVSpherePersistentVolumeReclaim") | ||||
| 	if len(volumePath) > 0 { | ||||
| 		err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	} | ||||
| 	if pv != nil { | ||||
| 		framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pv.Name), "Failed to delete PV ", pv.Name) | ||||
| 	} | ||||
| 	if pvc != nil { | ||||
| 		framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // func to wait until PV and PVC bind and once bind completes, delete the PVC | ||||
| func deletePVCAfterBind(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, timeouts *framework.TimeoutContext) { | ||||
| 	var err error | ||||
|  | ||||
| 	ginkgo.By("wait for the pv and pvc to bind") | ||||
| 	framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, timeouts, ns, pv, pvc)) | ||||
|  | ||||
| 	ginkgo.By("delete pvc") | ||||
| 	framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) | ||||
| 	_, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{}) | ||||
| 	if !apierrors.IsNotFound(err) { | ||||
| 		framework.ExpectNoError(err) | ||||
| 	} | ||||
| } | ||||
| @@ -1,154 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| This is a function test for Selector-Label Volume Binding Feature | ||||
| Test verifies volume with the matching label is bounded with the PVC. | ||||
|  | ||||
| Test Steps | ||||
| ---------- | ||||
| 1. Create VMDK. | ||||
| 2. Create pv with label volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete. | ||||
| 3. Create PVC (pvcVvol) with label selector to match with volume-type:vvol | ||||
| 4. Create PVC (pvcSsd) with label selector to match with volume-type:ssd | ||||
| 5. Wait and verify pvSsd is bound with PV. | ||||
| 6. Verify Status of pvcVvol is still pending. | ||||
| 7. Delete pvcSsd. | ||||
| 8. verify associated pv is also deleted. | ||||
| 9. delete pvcVvol | ||||
| */ | ||||
| var _ = utils.SIGDescribe("PersistentVolumes", feature.Vsphere, feature.LabelSelector, func() { | ||||
| 	f := framework.NewDefaultFramework("pvclabelselector") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		c          clientset.Interface | ||||
| 		ns         string | ||||
| 		pvSsd      *v1.PersistentVolume | ||||
| 		pvcSsd     *v1.PersistentVolumeClaim | ||||
| 		pvcVvol    *v1.PersistentVolumeClaim | ||||
| 		volumePath string | ||||
| 		ssdlabels  map[string]string | ||||
| 		vvollabels map[string]string | ||||
| 		err        error | ||||
| 		nodeInfo   *NodeInfo | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		c = f.ClientSet | ||||
| 		ns = f.Namespace.Name | ||||
| 		Bootstrap(f) | ||||
| 		nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, c) | ||||
| 		framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) | ||||
| 		ssdlabels = make(map[string]string) | ||||
| 		ssdlabels["volume-type"] = "ssd" | ||||
| 		vvollabels = make(map[string]string) | ||||
| 		vvollabels["volume-type"] = "vvol" | ||||
|  | ||||
| 	}) | ||||
|  | ||||
| 	f.Describe("Selector-Label Volume Binding:vsphere", feature.Vsphere, func() { | ||||
| 		ginkgo.AfterEach(func(ctx context.Context) { | ||||
| 			ginkgo.By("Running clean up actions") | ||||
| 			if framework.ProviderIs("vsphere") { | ||||
| 				testCleanupVSpherePVClabelselector(ctx, c, ns, nodeInfo, volumePath, pvSsd, pvcSsd, pvcVvol) | ||||
| 			} | ||||
| 		}) | ||||
| 		ginkgo.It("should bind volume with claim for given label", func(ctx context.Context) { | ||||
| 			volumePath, pvSsd, pvcSsd, pvcVvol, err = testSetupVSpherePVClabelselector(ctx, c, nodeInfo, ns, ssdlabels, vvollabels) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("wait for the pvcSsd to bind with pvSsd") | ||||
| 			framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pvSsd, pvcSsd)) | ||||
|  | ||||
| 			ginkgo.By("Verify status of pvcVvol is pending") | ||||
| 			err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By("delete pvcSsd") | ||||
| 			framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name) | ||||
|  | ||||
| 			ginkgo.By("verify pvSsd is deleted") | ||||
| 			err = e2epv.WaitForPersistentVolumeDeleted(ctx, c, pvSsd.Name, 3*time.Second, 300*time.Second) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			volumePath = "" | ||||
|  | ||||
| 			ginkgo.By("delete pvcVvol") | ||||
| 			framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name) | ||||
| 		}) | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func testSetupVSpherePVClabelselector(ctx context.Context, c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim, err error) { | ||||
| 	ginkgo.By("creating vmdk") | ||||
| 	volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ginkgo.By("creating the pv with label volume-type:ssd") | ||||
| 	pvSsd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels) | ||||
| 	pvSsd, err = c.CoreV1().PersistentVolumes().Create(ctx, pvSsd, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ginkgo.By("creating pvc with label selector to match with volume-type:vvol") | ||||
| 	pvcVvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels) | ||||
| 	pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvcVvol, metav1.CreateOptions{}) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ginkgo.By("creating pvc with label selector to match with volume-type:ssd") | ||||
| 	pvcSsd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels) | ||||
| 	pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvcSsd, metav1.CreateOptions{}) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func testCleanupVSpherePVClabelselector(ctx context.Context, c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim) { | ||||
| 	ginkgo.By("running testCleanupVSpherePVClabelselector") | ||||
| 	if len(volumePath) > 0 { | ||||
| 		nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) | ||||
| 	} | ||||
| 	if pvcSsd != nil { | ||||
| 		framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name) | ||||
| 	} | ||||
| 	if pvcVvol != nil { | ||||
| 		framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name) | ||||
| 	} | ||||
| 	if pvSsd != nil { | ||||
| 		framework.ExpectNoError(e2epv.DeletePersistentVolume(ctx, c, pvSsd.Name), "Failed to delete PV ", pvSsd.Name) | ||||
| 	} | ||||
| } | ||||
| @@ -1,269 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/vmware/govmomi" | ||||
| 	"github.com/vmware/govmomi/find" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	"github.com/vmware/govmomi/vim25/soap" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	volDir                    = "kubevols" | ||||
| 	defaultDiskCapacityKB     = 2097152 | ||||
| 	defaultDiskFormat         = "thin" | ||||
| 	defaultSCSIControllerType = "lsiLogic" | ||||
| 	virtualMachineType        = "VirtualMachine" | ||||
| ) | ||||
|  | ||||
| // VSphere represents a vSphere instance where one or more kubernetes nodes are running. | ||||
| type VSphere struct { | ||||
| 	Config *Config | ||||
| 	Client *govmomi.Client | ||||
| } | ||||
|  | ||||
| // VolumeOptions specifies various options for a volume. | ||||
| type VolumeOptions struct { | ||||
| 	Name               string | ||||
| 	CapacityKB         int | ||||
| 	DiskFormat         string | ||||
| 	SCSIControllerType string | ||||
| 	Datastore          string | ||||
| } | ||||
|  | ||||
| // GetDatacenter returns the DataCenter Object for the given datacenterPath | ||||
| func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) { | ||||
| 	Connect(ctx, vs) | ||||
| 	finder := find.NewFinder(vs.Client.Client, false) | ||||
| 	return finder.Datacenter(ctx, datacenterPath) | ||||
| } | ||||
|  | ||||
| // GetDatacenterFromObjectReference returns the DataCenter Object for the given datacenter reference | ||||
| func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter { | ||||
| 	Connect(ctx, vs) | ||||
| 	return object.NewDatacenter(vs.Client.Client, dc.Reference()) | ||||
| } | ||||
|  | ||||
| // GetAllDatacenter returns all the DataCenter Objects | ||||
| func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) { | ||||
| 	Connect(ctx, vs) | ||||
| 	finder := find.NewFinder(vs.Client.Client, false) | ||||
| 	return finder.DatacenterList(ctx, "*") | ||||
| } | ||||
|  | ||||
| // GetVMByUUID returns the VM object Reference from the given vmUUID | ||||
| func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) { | ||||
| 	Connect(ctx, vs) | ||||
| 	datacenter := vs.GetDatacenterFromObjectReference(ctx, dc) | ||||
| 	s := object.NewSearchIndex(vs.Client.Client) | ||||
| 	vmUUID = strings.ToLower(strings.TrimSpace(vmUUID)) | ||||
| 	return s.FindByUuid(ctx, datacenter, vmUUID, true, nil) | ||||
| } | ||||
|  | ||||
| // GetHostFromVMReference returns host object reference of the host on which the specified VM resides | ||||
| func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedObjectReference) types.ManagedObjectReference { | ||||
| 	Connect(ctx, vs) | ||||
| 	var vmMo mo.VirtualMachine | ||||
| 	vs.Client.RetrieveOne(ctx, vm, []string{"summary.runtime.host"}, &vmMo) | ||||
| 	host := *vmMo.Summary.Runtime.Host | ||||
| 	return host | ||||
| } | ||||
|  | ||||
| // GetDatastoresMountedOnHost returns the datastore references of all the datastores mounted on the specified host | ||||
| func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.ManagedObjectReference) []types.ManagedObjectReference { | ||||
| 	Connect(ctx, vs) | ||||
| 	var hostMo mo.HostSystem | ||||
| 	vs.Client.RetrieveOne(ctx, host, []string{"datastore"}, &hostMo) | ||||
| 	return hostMo.Datastore | ||||
| } | ||||
|  | ||||
| // GetDatastoreRefFromName returns the datastore reference of the specified datastore | ||||
| func (vs *VSphere) GetDatastoreRefFromName(ctx context.Context, dc object.Reference, datastoreName string) (types.ManagedObjectReference, error) { | ||||
| 	Connect(ctx, vs) | ||||
| 	datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference()) | ||||
| 	finder := find.NewFinder(vs.Client.Client, false) | ||||
| 	finder.SetDatacenter(datacenter) | ||||
| 	datastore, err := finder.Datastore(ctx, datastoreName) | ||||
| 	return datastore.Reference(), err | ||||
| } | ||||
|  | ||||
| // GetFolderByPath gets the Folder Object Reference from the given folder path | ||||
| // folderPath should be the full path to folder | ||||
| func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) { | ||||
| 	Connect(ctx, vs) | ||||
| 	datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference()) | ||||
| 	finder := find.NewFinder(datacenter.Client(), false) | ||||
| 	finder.SetDatacenter(datacenter) | ||||
| 	vmFolder, err := finder.Folder(ctx, folderPath) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err) | ||||
| 		return vmFolderMor, err | ||||
| 	} | ||||
| 	return vmFolder.Reference(), nil | ||||
| } | ||||
|  | ||||
| // CreateVolume creates a vsphere volume using given volume parameters specified in VolumeOptions. | ||||
| // If volume is created successfully the canonical disk path is returned else error is returned. | ||||
| func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	Connect(ctx, vs) | ||||
| 	datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef) | ||||
| 	var ( | ||||
| 		err                     error | ||||
| 		directoryAlreadyPresent = false | ||||
| 	) | ||||
| 	if datacenter == nil { | ||||
| 		return "", fmt.Errorf("datacenter is nil") | ||||
| 	} | ||||
| 	vs.initVolumeOptions(volumeOptions) | ||||
| 	finder := find.NewFinder(datacenter.Client(), false) | ||||
| 	finder.SetDatacenter(datacenter) | ||||
| 	ds, err := finder.Datastore(ctx, volumeOptions.Datastore) | ||||
| 	if err != nil { | ||||
| 		return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err) | ||||
| 	} | ||||
| 	directoryPath := filepath.Clean(ds.Path(volDir)) + "/" | ||||
| 	fileManager := object.NewFileManager(ds.Client()) | ||||
| 	err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false) | ||||
| 	if err != nil { | ||||
| 		if soap.IsSoapFault(err) { | ||||
| 			soapFault := soap.ToSoapFault(err) | ||||
| 			if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok { | ||||
| 				directoryAlreadyPresent = true | ||||
| 				framework.Logf("Directory with the path %+q is already present", directoryPath) | ||||
| 			} | ||||
| 		} | ||||
| 		if !directoryAlreadyPresent { | ||||
| 			framework.Logf("Cannot create dir %#v. err %s", directoryPath, err) | ||||
| 			return "", err | ||||
| 		} | ||||
| 	} | ||||
| 	framework.Logf("Created dir with path as %+q", directoryPath) | ||||
| 	vmdkPath := directoryPath + volumeOptions.Name + ".vmdk" | ||||
|  | ||||
| 	// Create a virtual disk manager | ||||
| 	vdm := object.NewVirtualDiskManager(ds.Client()) | ||||
| 	// Create specification for new virtual disk | ||||
| 	vmDiskSpec := &types.FileBackedVirtualDiskSpec{ | ||||
| 		VirtualDiskSpec: types.VirtualDiskSpec{ | ||||
| 			AdapterType: volumeOptions.SCSIControllerType, | ||||
| 			DiskType:    volumeOptions.DiskFormat, | ||||
| 		}, | ||||
| 		CapacityKb: int64(volumeOptions.CapacityKB), | ||||
| 	} | ||||
| 	// Create virtual disk | ||||
| 	task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	taskInfo, err := task.WaitForResult(ctx, nil) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	volumePath := taskInfo.Result.(string) | ||||
| 	canonicalDiskPath, err := getCanonicalVolumePath(ctx, datacenter, volumePath) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return canonicalDiskPath, nil | ||||
| } | ||||
|  | ||||
| // DeleteVolume deletes the vmdk file specified in the volumePath. | ||||
| // if an error is encountered while deleting volume, error is returned. | ||||
| func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedObjectReference) error { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	Connect(ctx, vs) | ||||
|  | ||||
| 	datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef) | ||||
| 	virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client()) | ||||
| 	diskPath := removeStorageClusterORFolderNameFromVDiskPath(volumePath) | ||||
| 	// Delete virtual disk | ||||
| 	task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to delete virtual disk. err: %v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	err = task.Wait(ctx) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to delete virtual disk. err: %v", err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // IsVMPresent checks if VM with the name specified in the vmName argument, is present in the vCenter inventory. | ||||
| // if VM is present, function returns true else false. | ||||
| func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectReference) (isVMPresent bool, err error) { | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	defer cancel() | ||||
| 	Connect(ctx, vs) | ||||
| 	folderMor, err := vs.GetFolderByPath(ctx, dataCenterRef, vs.Config.Folder) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	vmFolder := object.NewFolder(vs.Client.Client, folderMor) | ||||
| 	vmFoldersChildren, err := vmFolder.Children(ctx) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err) | ||||
| 		return | ||||
| 	} | ||||
| 	for _, vmFoldersChild := range vmFoldersChildren { | ||||
| 		if vmFoldersChild.Reference().Type == virtualMachineType { | ||||
| 			if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName { | ||||
| 				return true, nil | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // initVolumeOptions function sets default values for volumeOptions parameters if not set | ||||
| func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) { | ||||
| 	if volumeOptions == nil { | ||||
| 		volumeOptions = &VolumeOptions{} | ||||
| 	} | ||||
| 	if volumeOptions.Datastore == "" { | ||||
| 		volumeOptions.Datastore = vs.Config.DefaultDatastore | ||||
| 	} | ||||
| 	if volumeOptions.CapacityKB == 0 { | ||||
| 		volumeOptions.CapacityKB = defaultDiskCapacityKB | ||||
| 	} | ||||
| 	if volumeOptions.Name == "" { | ||||
| 		volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10) | ||||
| 	} | ||||
| 	if volumeOptions.DiskFormat == "" { | ||||
| 		volumeOptions.DiskFormat = defaultDiskFormat | ||||
| 	} | ||||
| 	if volumeOptions.SCSIControllerType == "" { | ||||
| 		volumeOptions.SCSIControllerType = defaultSCSIControllerType | ||||
| 	} | ||||
| } | ||||
| @@ -1,129 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"github.com/onsi/gomega" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| ) | ||||
|  | ||||
| // VSphereCSIMigrationEnabled is the environment variables to help | ||||
| // determine test verification flow. | ||||
| const VSphereCSIMigrationEnabled = "VSPHERE_CSI_MIGRATION_ENABLED" | ||||
|  | ||||
| // environment variables related to datastore parameters | ||||
| const ( | ||||
| 	SPBMPolicyName             = "VSPHERE_SPBM_POLICY_NAME" | ||||
| 	StorageClassDatastoreName  = "VSPHERE_DATASTORE" | ||||
| 	SecondSharedDatastore      = "VSPHERE_SECOND_SHARED_DATASTORE" | ||||
| 	KubernetesClusterName      = "VSPHERE_KUBERNETES_CLUSTER" | ||||
| 	SPBMTagPolicy              = "VSPHERE_SPBM_TAG_POLICY" | ||||
| 	VCPClusterDatastore        = "CLUSTER_DATASTORE" | ||||
| 	SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER" | ||||
| ) | ||||
|  | ||||
| // environment variables used for scaling tests | ||||
| const ( | ||||
| 	VCPScaleVolumeCount   = "VCP_SCALE_VOLUME_COUNT" | ||||
| 	VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD" | ||||
| 	VCPScaleInstances     = "VCP_SCALE_INSTANCES" | ||||
| ) | ||||
|  | ||||
| // environment variables used for stress tests | ||||
| const ( | ||||
| 	VCPStressInstances  = "VCP_STRESS_INSTANCES" | ||||
| 	VCPStressIterations = "VCP_STRESS_ITERATIONS" | ||||
| ) | ||||
|  | ||||
| // environment variables used for performance tests | ||||
| const ( | ||||
| 	VCPPerfVolumeCount   = "VCP_PERF_VOLUME_COUNT" | ||||
| 	VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD" | ||||
| 	VCPPerfIterations    = "VCP_PERF_ITERATIONS" | ||||
| ) | ||||
|  | ||||
| // environment variables used for zone tests | ||||
| const ( | ||||
| 	VCPZoneVsanDatastore1      = "VCP_ZONE_VSANDATASTORE1" | ||||
| 	VCPZoneVsanDatastore2      = "VCP_ZONE_VSANDATASTORE2" | ||||
| 	VCPZoneLocalDatastore      = "VCP_ZONE_LOCALDATASTORE" | ||||
| 	VCPZoneCompatPolicyName    = "VCP_ZONE_COMPATPOLICY_NAME" | ||||
| 	VCPZoneNonCompatPolicyName = "VCP_ZONE_NONCOMPATPOLICY_NAME" | ||||
| 	VCPZoneA                   = "VCP_ZONE_A" | ||||
| 	VCPZoneB                   = "VCP_ZONE_B" | ||||
| 	VCPZoneC                   = "VCP_ZONE_C" | ||||
| 	VCPZoneD                   = "VCP_ZONE_D" | ||||
| 	VCPInvalidZone             = "VCP_INVALID_ZONE" | ||||
| ) | ||||
|  | ||||
| // storage class parameters | ||||
| const ( | ||||
| 	Datastore                    = "datastore" | ||||
| 	PolicyDiskStripes            = "diskStripes" | ||||
| 	PolicyHostFailuresToTolerate = "hostFailuresToTolerate" | ||||
| 	PolicyCacheReservation       = "cacheReservation" | ||||
| 	PolicyObjectSpaceReservation = "objectSpaceReservation" | ||||
| 	PolicyIopsLimit              = "iopsLimit" | ||||
| 	DiskFormat                   = "diskformat" | ||||
| 	SpbmStoragePolicy            = "storagepolicyname" | ||||
| ) | ||||
|  | ||||
| // test values for storage class parameters | ||||
| const ( | ||||
| 	ThinDisk                                   = "thin" | ||||
| 	BronzeStoragePolicy                        = "bronze" | ||||
| 	HostFailuresToTolerateCapabilityVal        = "0" | ||||
| 	CacheReservationCapabilityVal              = "20" | ||||
| 	DiskStripesCapabilityVal                   = "1" | ||||
| 	ObjectSpaceReservationCapabilityVal        = "30" | ||||
| 	IopsLimitCapabilityVal                     = "100" | ||||
| 	StripeWidthCapabilityVal                   = "2" | ||||
| 	DiskStripesCapabilityInvalidVal            = "14" | ||||
| 	HostFailuresToTolerateCapabilityInvalidVal = "4" | ||||
| ) | ||||
|  | ||||
| // GetAndExpectStringEnvVar returns the string value of an environment variable or fails if | ||||
| // the variable is not set | ||||
| func GetAndExpectStringEnvVar(varName string) string { | ||||
| 	varValue := os.Getenv(varName) | ||||
| 	gomega.Expect(varValue).NotTo(gomega.BeEmpty(), "ENV "+varName+" is not set") | ||||
| 	return varValue | ||||
| } | ||||
|  | ||||
| // GetAndExpectIntEnvVar returns the integer value of an environment variable or fails if | ||||
| // the variable is not set | ||||
| func GetAndExpectIntEnvVar(varName string) int { | ||||
| 	varValue := GetAndExpectStringEnvVar(varName) | ||||
| 	varIntValue, err := strconv.Atoi(varValue) | ||||
| 	framework.ExpectNoError(err, "Error Parsing "+varName) | ||||
| 	return varIntValue | ||||
| } | ||||
|  | ||||
| // GetAndExpectBoolEnvVar returns the bool value of an environment variable | ||||
| // if environment variable is not set return false | ||||
| func GetAndExpectBoolEnvVar(varName string) bool { | ||||
| 	varValue := os.Getenv(varName) | ||||
| 	if varValue == "" { | ||||
| 		return false | ||||
| 	} | ||||
| 	varBoolValue, err := strconv.ParseBool(varValue) | ||||
| 	framework.ExpectNoError(err, "Error Parsing "+varName) | ||||
| 	return varBoolValue | ||||
| } | ||||
| @@ -1,243 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	storagev1 "k8s.io/api/storage/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes. | ||||
| The following actions will be performed as part of this test. | ||||
|  | ||||
| 1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.) | ||||
| 2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment. | ||||
| 3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes. | ||||
| 4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it. | ||||
| 5. Once all the go routines are completed, we delete all the pods and volumes. | ||||
| */ | ||||
| const ( | ||||
| 	NodeLabelKey = "vsphere_e2e_label" | ||||
| ) | ||||
|  | ||||
| // NodeSelector holds | ||||
| type NodeSelector struct { | ||||
| 	labelKey   string | ||||
| 	labelValue string | ||||
| } | ||||
|  | ||||
| var _ = utils.SIGDescribe("vcp at scale", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("vcp-at-scale") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
|  | ||||
| 	var ( | ||||
| 		client            clientset.Interface | ||||
| 		namespace         string | ||||
| 		nodeSelectorList  []*NodeSelector | ||||
| 		volumeCount       int | ||||
| 		numberOfInstances int | ||||
| 		volumesPerPod     int | ||||
| 		policyName        string | ||||
| 		datastoreName     string | ||||
| 		nodeVolumeMapChan chan map[string][]string | ||||
| 		nodes             *v1.NodeList | ||||
| 		scNames           = []string{storageclass1, storageclass2, storageclass3, storageclass4} | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		nodeVolumeMapChan = make(chan map[string][]string) | ||||
|  | ||||
| 		// Read the environment variables | ||||
| 		volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount) | ||||
| 		volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod) | ||||
|  | ||||
| 		numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances) | ||||
| 		if numberOfInstances > 5 { | ||||
| 			framework.Failf("Maximum 5 instances allowed, got instead: %v", numberOfInstances) | ||||
| 		} | ||||
| 		if numberOfInstances > volumeCount { | ||||
| 			framework.Failf("Number of instances: %v cannot be greater than volume count: %v", numberOfInstances, volumeCount) | ||||
| 		} | ||||
|  | ||||
| 		policyName = GetAndExpectStringEnvVar(SPBMPolicyName) | ||||
| 		datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) | ||||
|  | ||||
| 		var err error | ||||
| 		nodes, err = e2enode.GetReadySchedulableNodes(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		if len(nodes.Items) < 2 { | ||||
| 			e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) | ||||
| 		} | ||||
| 		// Verify volume count specified by the user can be satisfied | ||||
| 		if volumeCount > volumesPerNode*len(nodes.Items) { | ||||
| 			e2eskipper.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items)) | ||||
| 		} | ||||
| 		nodeSelectorList = createNodeLabels(client, namespace, nodes) | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			for _, node := range nodes.Items { | ||||
| 				e2enode.RemoveLabelOffNode(client, node.Name, NodeLabelKey) | ||||
| 			} | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("vsphere scale tests", func(ctx context.Context) { | ||||
| 		var pvcClaimList []string | ||||
| 		nodeVolumeMap := make(map[string][]string) | ||||
| 		// Volumes will be provisioned with each different types of Storage Class | ||||
| 		scArrays := make([]*storagev1.StorageClass, len(scNames)) | ||||
| 		for index, scname := range scNames { | ||||
| 			// Create vSphere Storage Class | ||||
| 			ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname)) | ||||
| 			var sc *storagev1.StorageClass | ||||
| 			scParams := make(map[string]string) | ||||
| 			var err error | ||||
| 			switch scname { | ||||
| 			case storageclass1: | ||||
| 				scParams = nil | ||||
| 			case storageclass2: | ||||
| 				scParams[PolicyHostFailuresToTolerate] = "1" | ||||
| 			case storageclass3: | ||||
| 				scParams[SpbmStoragePolicy] = policyName | ||||
| 			case storageclass4: | ||||
| 				scParams[Datastore] = datastoreName | ||||
| 			} | ||||
| 			sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{}) | ||||
| 			gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") | ||||
| 			framework.ExpectNoError(err, "Failed to create storage class") | ||||
| 			ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, scname, metav1.DeleteOptions{}) | ||||
| 			scArrays[index] = sc | ||||
| 		} | ||||
|  | ||||
| 		volumeCountPerInstance := volumeCount / numberOfInstances | ||||
| 		for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ { | ||||
| 			if instanceCount == numberOfInstances-1 { | ||||
| 				volumeCountPerInstance = volumeCount | ||||
| 			} | ||||
| 			volumeCount = volumeCount - volumeCountPerInstance | ||||
| 			go VolumeCreateAndAttach(ctx, f, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan) | ||||
| 		} | ||||
|  | ||||
| 		// Get the list of all volumes attached to each node from the go routines by reading the data from the channel | ||||
| 		for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ { | ||||
| 			for node, volumeList := range <-nodeVolumeMapChan { | ||||
| 				nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...) | ||||
| 			} | ||||
| 		} | ||||
| 		podList, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) | ||||
| 		framework.ExpectNoError(err, "Failed to list pods") | ||||
| 		for _, pod := range podList.Items { | ||||
| 			pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) | ||||
| 			ginkgo.By("Deleting pod") | ||||
| 			err = e2epod.DeletePodWithWait(ctx, client, &pod) | ||||
| 			framework.ExpectNoError(err) | ||||
| 		} | ||||
| 		ginkgo.By("Waiting for volumes to be detached from the node") | ||||
| 		err = waitForVSphereDisksToDetach(ctx, nodeVolumeMap) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		for _, pvcClaim := range pvcClaimList { | ||||
| 			err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvcClaim, namespace) | ||||
| 			framework.ExpectNoError(err) | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| // Get PVC claims for the pod | ||||
| func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string { | ||||
| 	pvcClaimList := make([]string, volumesPerPod) | ||||
| 	for i, volumespec := range pod.Spec.Volumes { | ||||
| 		if volumespec.PersistentVolumeClaim != nil { | ||||
| 			pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName | ||||
| 		} | ||||
| 	} | ||||
| 	return pvcClaimList | ||||
| } | ||||
|  | ||||
| // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale | ||||
| func VolumeCreateAndAttach(ctx context.Context, f *framework.Framework, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) { | ||||
| 	defer ginkgo.GinkgoRecover() | ||||
| 	client := f.ClientSet | ||||
| 	namespace := f.Namespace.Name | ||||
| 	nodeVolumeMap := make(map[string][]string) | ||||
| 	nodeSelectorIndex := 0 | ||||
| 	for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod { | ||||
| 		if (volumeCountPerInstance - index) < volumesPerPod { | ||||
| 			volumesPerPod = volumeCountPerInstance - index | ||||
| 		} | ||||
| 		pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod) | ||||
| 		for i := 0; i < volumesPerPod; i++ { | ||||
| 			ginkgo.By("Creating PVC using the Storage Class") | ||||
| 			pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)])) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			pvclaims[i] = pvclaim | ||||
| 		} | ||||
|  | ||||
| 		ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 		persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Creating pod to attach PV to the node") | ||||
| 		nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)] | ||||
| 		// Create pod to attach Volume to Node | ||||
| 		pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		for _, pv := range persistentvolumes { | ||||
| 			nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) | ||||
| 		} | ||||
| 		ginkgo.By("Verify the volume is accessible and available in the pod") | ||||
| 		verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) | ||||
| 		nodeSelectorIndex++ | ||||
| 	} | ||||
| 	nodeVolumeMapChan <- nodeVolumeMap | ||||
| 	close(nodeVolumeMapChan) | ||||
| } | ||||
|  | ||||
| func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector { | ||||
| 	var nodeSelectorList []*NodeSelector | ||||
| 	for i, node := range nodes.Items { | ||||
| 		labelVal := "vsphere_e2e_" + strconv.Itoa(i) | ||||
| 		nodeSelector := &NodeSelector{ | ||||
| 			labelKey:   NodeLabelKey, | ||||
| 			labelValue: labelVal, | ||||
| 		} | ||||
| 		nodeSelectorList = append(nodeSelectorList, nodeSelector) | ||||
| 		e2enode.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal) | ||||
| 	} | ||||
| 	return nodeSelectorList | ||||
| } | ||||
| @@ -1,164 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	apierrors "k8s.io/apimachinery/pkg/api/errors" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| 	Test performs following operations | ||||
|  | ||||
| 	Steps | ||||
| 	1. Create a storage class with thin diskformat. | ||||
| 	2. Create nginx service. | ||||
| 	3. Create nginx statefulsets with 3 replicas. | ||||
| 	4. Wait until all Pods are ready and PVCs are bounded with PV. | ||||
| 	5. Verify volumes are accessible in all statefulsets pods with creating empty file. | ||||
| 	6. Scale down statefulsets to 2 replicas. | ||||
| 	7. Scale up statefulsets to 4 replicas. | ||||
| 	8. Scale down statefulsets to 0 replicas and delete all pods. | ||||
| 	9. Delete all PVCs from the test namespace. | ||||
| 	10. Delete the storage class. | ||||
| */ | ||||
|  | ||||
| const ( | ||||
| 	manifestPath     = "test/e2e/testing-manifests/statefulset/nginx" | ||||
| 	mountPath        = "/usr/share/nginx/html" | ||||
| 	storageclassname = "nginx-sc" | ||||
| ) | ||||
|  | ||||
| var _ = utils.SIGDescribe("vsphere statefulset", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("vsphere-statefulset") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		namespace string | ||||
| 		client    clientset.Interface | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func() { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		namespace = f.Namespace.Name | ||||
| 		client = f.ClientSet | ||||
| 		Bootstrap(f) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("vsphere statefulset testing", func(ctx context.Context) { | ||||
| 		ginkgo.By("Creating StorageClass for Statefulset") | ||||
| 		scParameters := make(map[string]string) | ||||
| 		scParameters["diskformat"] = "thin" | ||||
| 		scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "") | ||||
| 		sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 		ginkgo.By("Creating statefulset") | ||||
|  | ||||
| 		statefulset := e2estatefulset.CreateStatefulSet(ctx, client, manifestPath, namespace) | ||||
| 		ginkgo.DeferCleanup(e2estatefulset.DeleteAllStatefulSets, client, namespace) | ||||
| 		replicas := *(statefulset.Spec.Replicas) | ||||
| 		// Waiting for pods status to be Ready | ||||
| 		e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) | ||||
| 		framework.ExpectNoError(e2estatefulset.CheckMount(ctx, client, statefulset, mountPath)) | ||||
| 		ssPodsBeforeScaleDown := e2estatefulset.GetPodList(ctx, client, statefulset) | ||||
| 		gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) | ||||
| 		gomega.Expect(ssPodsBeforeScaleDown.Items).To(gomega.HaveLen(int(replicas)), "Number of Pods in the statefulset should match with number of replicas") | ||||
|  | ||||
| 		// Get the list of Volumes attached to Pods before scale down | ||||
| 		volumesBeforeScaleDown := make(map[string]string) | ||||
| 		for _, sspod := range ssPodsBeforeScaleDown.Items { | ||||
| 			_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			for _, volumespec := range sspod.Spec.Volumes { | ||||
| 				if volumespec.PersistentVolumeClaim != nil { | ||||
| 					volumePath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) | ||||
| 					volumesBeforeScaleDown[volumePath] = volumespec.PersistentVolumeClaim.ClaimName | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) | ||||
| 		_, scaledownErr := e2estatefulset.Scale(ctx, client, statefulset, replicas-1) | ||||
| 		framework.ExpectNoError(scaledownErr) | ||||
| 		e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas-1) | ||||
|  | ||||
| 		// After scale down, verify vsphere volumes are detached from deleted pods | ||||
| 		ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") | ||||
| 		for _, sspod := range ssPodsBeforeScaleDown.Items { | ||||
| 			_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) | ||||
| 			if err != nil { | ||||
| 				if !apierrors.IsNotFound(err) { | ||||
| 					framework.Failf("Error in getting Pod %s: %v", sspod.Name, err) | ||||
| 				} | ||||
| 				for _, volumespec := range sspod.Spec.Volumes { | ||||
| 					if volumespec.PersistentVolumeClaim != nil { | ||||
| 						vSpherediskPath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) | ||||
| 						framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName) | ||||
| 						framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, vSpherediskPath, sspod.Spec.NodeName)) | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) | ||||
| 		_, scaleupErr := e2estatefulset.Scale(ctx, client, statefulset, replicas) | ||||
| 		framework.ExpectNoError(scaleupErr) | ||||
| 		e2estatefulset.WaitForStatusReplicas(ctx, client, statefulset, replicas) | ||||
| 		e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas) | ||||
|  | ||||
| 		ssPodsAfterScaleUp := e2estatefulset.GetPodList(ctx, client, statefulset) | ||||
| 		gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) | ||||
| 		gomega.Expect(ssPodsAfterScaleUp.Items).To(gomega.HaveLen(int(replicas)), "Number of Pods in the statefulset should match with number of replicas") | ||||
|  | ||||
| 		// After scale up, verify all vsphere volumes are attached to node VMs. | ||||
| 		ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") | ||||
| 		for _, sspod := range ssPodsAfterScaleUp.Items { | ||||
| 			err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, client, sspod.Name, statefulset.Namespace, framework.PodStartTimeout) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			for _, volumespec := range pod.Spec.Volumes { | ||||
| 				if volumespec.PersistentVolumeClaim != nil { | ||||
| 					vSpherediskPath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) | ||||
| 					framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) | ||||
| 					// Verify scale up has re-attached the same volumes and not introduced new volume | ||||
| 					if volumesBeforeScaleDown[vSpherediskPath] == "" { | ||||
| 						framework.Failf("Volume: %q was not attached to the Node: %q before scale down", vSpherediskPath, sspod.Spec.NodeName) | ||||
| 					} | ||||
| 					isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, vSpherediskPath, sspod.Spec.NodeName) | ||||
| 					if !isVolumeAttached { | ||||
| 						framework.Failf("Volume: %q is not attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) | ||||
| 					} | ||||
| 					framework.ExpectNoError(verifyDiskAttachedError) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,190 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	storagev1 "k8s.io/api/storage/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread. | ||||
| The following actions will be performed as part of this test. | ||||
|  | ||||
| 1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.) | ||||
| 2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment. | ||||
| 3. Launch goroutine for volume lifecycle operations. | ||||
| 4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS | ||||
| 5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC. | ||||
| */ | ||||
| var _ = utils.SIGDescribe("vsphere cloud provider stress", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("vcp-stress") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		client        clientset.Interface | ||||
| 		namespace     string | ||||
| 		instances     int | ||||
| 		iterations    int | ||||
| 		policyName    string | ||||
| 		datastoreName string | ||||
| 		scNames       = []string{storageclass1, storageclass2, storageclass3, storageclass4} | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
|  | ||||
| 		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times. | ||||
| 		// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class, | ||||
| 		// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc. | ||||
| 		instances = GetAndExpectIntEnvVar(VCPStressInstances) | ||||
| 		if instances > volumesPerNode*len(nodeList.Items) { | ||||
| 			framework.Failf("Number of Instances should be less or equal: %v, got instead %v", volumesPerNode*len(nodeList.Items), instances) | ||||
| 		} | ||||
| 		if instances <= len(scNames) { | ||||
| 			framework.Failf("VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes, got instead %v", instances) | ||||
| 		} | ||||
|  | ||||
| 		iterations = GetAndExpectIntEnvVar(VCPStressIterations) | ||||
| 		if iterations <= 0 { | ||||
| 			framework.Failf("VCP_STRESS_ITERATIONS should be greater than 0, got instead %v", iterations) | ||||
| 		} | ||||
|  | ||||
| 		policyName = GetAndExpectStringEnvVar(SPBMPolicyName) | ||||
| 		datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("vsphere stress tests", func(ctx context.Context) { | ||||
| 		scArrays := make([]*storagev1.StorageClass, len(scNames)) | ||||
| 		for index, scname := range scNames { | ||||
| 			// Create vSphere Storage Class | ||||
| 			ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname)) | ||||
| 			var sc *storagev1.StorageClass | ||||
| 			var err error | ||||
| 			switch scname { | ||||
| 			case storageclass1: | ||||
| 				sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) | ||||
| 			case storageclass2: | ||||
| 				var scVSanParameters map[string]string | ||||
| 				scVSanParameters = make(map[string]string) | ||||
| 				scVSanParameters[PolicyHostFailuresToTolerate] = "1" | ||||
| 				sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 			case storageclass3: | ||||
| 				var scSPBMPolicyParameters map[string]string | ||||
| 				scSPBMPolicyParameters = make(map[string]string) | ||||
| 				scSPBMPolicyParameters[SpbmStoragePolicy] = policyName | ||||
| 				sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 			case storageclass4: | ||||
| 				var scWithDSParameters map[string]string | ||||
| 				scWithDSParameters = make(map[string]string) | ||||
| 				scWithDSParameters[Datastore] = datastoreName | ||||
| 				scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") | ||||
| 				sc, err = client.StorageV1().StorageClasses().Create(ctx, scWithDatastoreSpec, metav1.CreateOptions{}) | ||||
| 			} | ||||
| 			gomega.Expect(sc).NotTo(gomega.BeNil()) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), scname, metav1.DeleteOptions{}) | ||||
| 			scArrays[index] = sc | ||||
| 		} | ||||
|  | ||||
| 		var wg sync.WaitGroup | ||||
| 		wg.Add(instances) | ||||
| 		for instanceCount := 0; instanceCount < instances; instanceCount++ { | ||||
| 			instanceID := fmt.Sprintf("Thread:%v", instanceCount+1) | ||||
| 			go PerformVolumeLifeCycleInParallel(ctx, f, client, namespace, instanceID, scArrays[instanceCount%len(scArrays)], iterations, &wg) | ||||
| 		} | ||||
| 		wg.Wait() | ||||
| 	}) | ||||
|  | ||||
| }) | ||||
|  | ||||
| // PerformVolumeLifeCycleInParallel performs volume lifecycle operations | ||||
| // Called as a go routine to perform operations in parallel | ||||
| func PerformVolumeLifeCycleInParallel(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, instanceID string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) { | ||||
| 	defer wg.Done() | ||||
| 	defer ginkgo.GinkgoRecover() | ||||
|  | ||||
| 	for iterationCount := 0; iterationCount < iterations; iterationCount++ { | ||||
| 		logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceID, iterationCount+1) | ||||
| 		ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) | ||||
| 		pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 		var pvclaims []*v1.PersistentVolumeClaim | ||||
| 		pvclaims = append(pvclaims, pvclaim) | ||||
| 		ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) | ||||
| 		persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) | ||||
| 		// Create pod to attach Volume to Node | ||||
| 		pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) | ||||
| 		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		// Get the copy of the Pod to know the assigned node name. | ||||
| 		pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) | ||||
| 		isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) | ||||
| 		if !isVolumeAttached { | ||||
| 			framework.Failf("Volume: %s is not attached to the node: %v", persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) | ||||
| 		} | ||||
| 		framework.ExpectNoError(verifyDiskAttachedError) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) | ||||
| 		verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) | ||||
| 		err = e2epod.DeletePodWithWait(ctx, client, pod) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) | ||||
| 		err = waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) | ||||
| 		err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	} | ||||
| } | ||||
| @@ -1,848 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| 	"regexp" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	"github.com/vmware/govmomi/find" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/mo" | ||||
| 	vim25types "github.com/vmware/govmomi/vim25/types" | ||||
| 	"k8s.io/klog/v2" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	storagev1 "k8s.io/api/storage/v1" | ||||
| 	"k8s.io/apimachinery/pkg/api/resource" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/rand" | ||||
| 	"k8s.io/apimachinery/pkg/util/uuid" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	imageutils "k8s.io/kubernetes/test/utils/image" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	volumesPerNode = 55 | ||||
| 	storageclass1  = "sc-default" | ||||
| 	storageclass2  = "sc-vsan" | ||||
| 	storageclass3  = "sc-spbm" | ||||
| 	storageclass4  = "sc-user-specified-ds" | ||||
| 	dummyDiskName  = "kube-dummyDisk.vmdk" | ||||
| 	providerPrefix = "vsphere://" | ||||
| ) | ||||
|  | ||||
| // volumeState represents the state of a volume. | ||||
| type volumeState int32 | ||||
|  | ||||
| const ( | ||||
| 	volumeStateDetached volumeState = 1 | ||||
| 	volumeStateAttached volumeState = 2 | ||||
| ) | ||||
|  | ||||
| // Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes | ||||
| func waitForVSphereDisksToDetach(ctx context.Context, nodeVolumes map[string][]string) error { | ||||
| 	var ( | ||||
| 		detachTimeout  = 5 * time.Minute | ||||
| 		detachPollTime = 10 * time.Second | ||||
| 	) | ||||
| 	waitErr := wait.PollWithContext(ctx, detachPollTime, detachTimeout, func(ctx context.Context) (bool, error) { | ||||
| 		attachedResult, err := disksAreAttached(ctx, nodeVolumes) | ||||
| 		if err != nil { | ||||
| 			return false, err | ||||
| 		} | ||||
| 		for nodeName, nodeVolumes := range attachedResult { | ||||
| 			for volumePath, attached := range nodeVolumes { | ||||
| 				if attached { | ||||
| 					framework.Logf("Volume %q is still attached to %q.", volumePath, string(nodeName)) | ||||
| 					return false, nil | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes) | ||||
| 		return true, nil | ||||
| 	}) | ||||
| 	if waitErr != nil { | ||||
| 		if wait.Interrupted(waitErr) { | ||||
| 			return fmt.Errorf("volumes have not detached after %v: %v", detachTimeout, waitErr) | ||||
| 		} | ||||
| 		return fmt.Errorf("error waiting for volumes to detach: %v", waitErr) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes | ||||
| func waitForVSphereDiskStatus(ctx context.Context, volumePath string, nodeName string, expectedState volumeState) error { | ||||
| 	var ( | ||||
| 		currentState volumeState | ||||
| 		timeout      = 6 * time.Minute | ||||
| 		pollTime     = 10 * time.Second | ||||
| 	) | ||||
|  | ||||
| 	var attachedState = map[bool]volumeState{ | ||||
| 		true:  volumeStateAttached, | ||||
| 		false: volumeStateDetached, | ||||
| 	} | ||||
|  | ||||
| 	var attachedStateMsg = map[volumeState]string{ | ||||
| 		volumeStateAttached: "attached to", | ||||
| 		volumeStateDetached: "detached from", | ||||
| 	} | ||||
|  | ||||
| 	waitErr := wait.PollWithContext(ctx, pollTime, timeout, func(ctx context.Context) (bool, error) { | ||||
| 		diskAttached, err := diskIsAttached(ctx, volumePath, nodeName) | ||||
| 		if err != nil { | ||||
| 			return true, err | ||||
| 		} | ||||
|  | ||||
| 		currentState = attachedState[diskAttached] | ||||
| 		if currentState == expectedState { | ||||
| 			framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName) | ||||
| 			return true, nil | ||||
| 		} | ||||
| 		framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName) | ||||
| 		return false, nil | ||||
| 	}) | ||||
| 	if waitErr != nil { | ||||
| 		if wait.Interrupted(waitErr) { | ||||
| 			return fmt.Errorf("volume %q is not %s %q after %v: %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout, waitErr) | ||||
| 		} | ||||
| 		return fmt.Errorf("error waiting for volume %q to be %s %q: %v", volumePath, attachedStateMsg[expectedState], nodeName, waitErr) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Wait until vsphere vmdk is attached from the given node or time out after 6 minutes | ||||
| func waitForVSphereDiskToAttach(ctx context.Context, volumePath string, nodeName string) error { | ||||
| 	return waitForVSphereDiskStatus(ctx, volumePath, nodeName, volumeStateAttached) | ||||
| } | ||||
|  | ||||
| // Wait until vsphere vmdk is detached from the given node or time out after 6 minutes | ||||
| func waitForVSphereDiskToDetach(ctx context.Context, volumePath string, nodeName string) error { | ||||
| 	return waitForVSphereDiskStatus(ctx, volumePath, nodeName, volumeStateDetached) | ||||
| } | ||||
|  | ||||
| // function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels | ||||
| func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume { | ||||
| 	return e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{ | ||||
| 		NamePrefix: "vspherepv-", | ||||
| 		PVSource: v1.PersistentVolumeSource{ | ||||
| 			VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ | ||||
| 				VolumePath: volumePath, | ||||
| 				FSType:     "ext4", | ||||
| 			}, | ||||
| 		}, | ||||
| 		ReclaimPolicy: persistentVolumeReclaimPolicy, | ||||
| 		Capacity:      "2Gi", | ||||
| 		AccessModes: []v1.PersistentVolumeAccessMode{ | ||||
| 			v1.ReadWriteOnce, | ||||
| 		}, | ||||
| 		Labels: labels, | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| // function to get vsphere persistent volume spec with given selector labels. | ||||
| func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim { | ||||
| 	var ( | ||||
| 		pvc *v1.PersistentVolumeClaim | ||||
| 	) | ||||
| 	pvc = &v1.PersistentVolumeClaim{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			GenerateName: "pvc-", | ||||
| 			Namespace:    namespace, | ||||
| 		}, | ||||
| 		Spec: v1.PersistentVolumeClaimSpec{ | ||||
| 			AccessModes: []v1.PersistentVolumeAccessMode{ | ||||
| 				v1.ReadWriteOnce, | ||||
| 			}, | ||||
| 			Resources: v1.VolumeResourceRequirements{ | ||||
| 				Requests: v1.ResourceList{ | ||||
| 					v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"), | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	if labels != nil { | ||||
| 		pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels} | ||||
| 	} | ||||
|  | ||||
| 	return pvc | ||||
| } | ||||
|  | ||||
| // function to write content to the volume backed by given PVC | ||||
| func writeContentToVSpherePV(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) { | ||||
| 	utils.RunInPodWithVolume(ctx, client, timeouts, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data") | ||||
| 	framework.Logf("Done with writing content to volume") | ||||
| } | ||||
|  | ||||
| // function to verify content is matching on the volume backed for given PVC | ||||
| func verifyContentOfVSpherePV(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) { | ||||
| 	utils.RunInPodWithVolume(ctx, client, timeouts, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data") | ||||
| 	framework.Logf("Successfully verified content of the volume") | ||||
| } | ||||
|  | ||||
| func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) *storagev1.StorageClass { | ||||
| 	var sc *storagev1.StorageClass | ||||
|  | ||||
| 	sc = &storagev1.StorageClass{ | ||||
| 		TypeMeta: metav1.TypeMeta{ | ||||
| 			Kind: "StorageClass", | ||||
| 		}, | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			Name: name, | ||||
| 		}, | ||||
| 		Provisioner: "kubernetes.io/vsphere-volume", | ||||
| 	} | ||||
| 	if scParameters != nil { | ||||
| 		sc.Parameters = scParameters | ||||
| 	} | ||||
| 	if zones != nil { | ||||
| 		term := v1.TopologySelectorTerm{ | ||||
| 			MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{ | ||||
| 				{ | ||||
| 					Key:    v1.LabelTopologyZone, | ||||
| 					Values: zones, | ||||
| 				}, | ||||
| 			}, | ||||
| 		} | ||||
| 		sc.AllowedTopologies = append(sc.AllowedTopologies, term) | ||||
| 	} | ||||
| 	if volumeBindingMode != "" { | ||||
| 		mode := storagev1.VolumeBindingMode(string(volumeBindingMode)) | ||||
| 		sc.VolumeBindingMode = &mode | ||||
| 	} | ||||
| 	return sc | ||||
| } | ||||
|  | ||||
| func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storagev1.StorageClass) *v1.PersistentVolumeClaim { | ||||
| 	claim := &v1.PersistentVolumeClaim{ | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			GenerateName: "pvc-", | ||||
| 			Namespace:    ns, | ||||
| 		}, | ||||
| 		Spec: v1.PersistentVolumeClaimSpec{ | ||||
| 			AccessModes: []v1.PersistentVolumeAccessMode{ | ||||
| 				v1.ReadWriteOnce, | ||||
| 			}, | ||||
| 			Resources: v1.VolumeResourceRequirements{ | ||||
| 				Requests: v1.ResourceList{ | ||||
| 					v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize), | ||||
| 				}, | ||||
| 			}, | ||||
| 			StorageClassName: &(storageclass.Name), | ||||
| 		}, | ||||
| 	} | ||||
| 	return claim | ||||
| } | ||||
|  | ||||
| // func to get pod spec with given volume claim, node selector labels and command | ||||
| func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod { | ||||
| 	pod := &v1.Pod{ | ||||
| 		TypeMeta: metav1.TypeMeta{ | ||||
| 			Kind:       "Pod", | ||||
| 			APIVersion: "v1", | ||||
| 		}, | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			GenerateName: "pod-pvc-", | ||||
| 		}, | ||||
| 		Spec: v1.PodSpec{ | ||||
| 			Containers: []v1.Container{ | ||||
| 				{ | ||||
| 					Name:    "volume-tester", | ||||
| 					Image:   imageutils.GetE2EImage(imageutils.BusyBox), | ||||
| 					Command: []string{"/bin/sh"}, | ||||
| 					Args:    []string{"-c", command}, | ||||
| 					VolumeMounts: []v1.VolumeMount{ | ||||
| 						{ | ||||
| 							Name:      "my-volume", | ||||
| 							MountPath: "/mnt/test", | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 			RestartPolicy: v1.RestartPolicyNever, | ||||
| 			Volumes: []v1.Volume{ | ||||
| 				{ | ||||
| 					Name: "my-volume", | ||||
| 					VolumeSource: v1.VolumeSource{ | ||||
| 						PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ | ||||
| 							ClaimName: claimName, | ||||
| 							ReadOnly:  false, | ||||
| 						}, | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	if nodeSelectorKV != nil { | ||||
| 		pod.Spec.NodeSelector = nodeSelectorKV | ||||
| 	} | ||||
| 	return pod | ||||
| } | ||||
|  | ||||
| // func to get pod spec with given volume paths, node selector labels and container commands | ||||
| func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod { | ||||
| 	var volumeMounts []v1.VolumeMount | ||||
| 	var volumes []v1.Volume | ||||
|  | ||||
| 	for index, volumePath := range volumePaths { | ||||
| 		name := fmt.Sprintf("volume%v", index+1) | ||||
| 		volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name}) | ||||
| 		vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource) | ||||
| 		vsphereVolume.VolumePath = volumePath | ||||
| 		vsphereVolume.FSType = "ext4" | ||||
| 		volumes = append(volumes, v1.Volume{Name: name}) | ||||
| 		volumes[index].VolumeSource.VsphereVolume = vsphereVolume | ||||
| 	} | ||||
|  | ||||
| 	if commands == nil || len(commands) == 0 { | ||||
| 		commands = []string{ | ||||
| 			"/bin/sh", | ||||
| 			"-c", | ||||
| 			"while true; do sleep 2; done", | ||||
| 		} | ||||
| 	} | ||||
| 	pod := &v1.Pod{ | ||||
| 		TypeMeta: metav1.TypeMeta{ | ||||
| 			Kind:       "Pod", | ||||
| 			APIVersion: "v1", | ||||
| 		}, | ||||
| 		ObjectMeta: metav1.ObjectMeta{ | ||||
| 			GenerateName: "vsphere-e2e-", | ||||
| 		}, | ||||
| 		Spec: v1.PodSpec{ | ||||
| 			Containers: []v1.Container{ | ||||
| 				{ | ||||
| 					Name:         "vsphere-e2e-container-" + string(uuid.NewUUID()), | ||||
| 					Image:        imageutils.GetE2EImage(imageutils.BusyBox), | ||||
| 					Command:      commands, | ||||
| 					VolumeMounts: volumeMounts, | ||||
| 				}, | ||||
| 			}, | ||||
| 			RestartPolicy: v1.RestartPolicyNever, | ||||
| 			Volumes:       volumes, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	if keyValuelabel != nil { | ||||
| 		pod.Spec.NodeSelector = keyValuelabel | ||||
| 	} | ||||
| 	return pod | ||||
| } | ||||
|  | ||||
| func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) { | ||||
| 	for _, filePath := range filePaths { | ||||
| 		_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/ls", filePath) | ||||
| 		framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) { | ||||
| 	for _, filePath := range filePaths { | ||||
| 		err := e2eoutput.CreateEmptyFileOnPod(namespace, podName, filePath) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // verify volumes are attached to the node and are accessible in pod | ||||
| func verifyVSphereVolumesAccessible(ctx context.Context, c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) { | ||||
| 	nodeName := pod.Spec.NodeName | ||||
| 	namespace := pod.Namespace | ||||
| 	for index, pv := range persistentvolumes { | ||||
| 		// Verify disks are attached to the node | ||||
| 		isAttached, err := diskIsAttached(ctx, pv.Spec.VsphereVolume.VolumePath, nodeName) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		if !isAttached { | ||||
| 			framework.Failf("disk %v is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName) | ||||
| 		} | ||||
| 		// Verify Volumes are accessible | ||||
| 		filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") | ||||
| 		_, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // verify volumes are created on one of the specified zones | ||||
| func verifyVolumeCreationOnRightZone(ctx context.Context, persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) { | ||||
| 	for _, pv := range persistentvolumes { | ||||
| 		volumePath := pv.Spec.VsphereVolume.VolumePath | ||||
| 		// Extract datastoreName from the volume path in the pv spec | ||||
| 		// For example : "vsanDatastore" is extracted from "[vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk" | ||||
| 		datastorePathObj, _ := getDatastorePathObjFromVMDiskPath(volumePath) | ||||
| 		datastoreName := datastorePathObj.Datastore | ||||
| 		nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) | ||||
| 		ctx, cancel := context.WithCancel(ctx) | ||||
| 		defer cancel() | ||||
| 		// Get the datastore object reference from the datastore name | ||||
| 		datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName) | ||||
| 		if err != nil { | ||||
| 			framework.ExpectNoError(err) | ||||
| 		} | ||||
| 		// Find common datastores among the specified zones | ||||
| 		var datastoreCountMap = make(map[string]int) | ||||
| 		numZones := len(zones) | ||||
| 		var commonDatastores []string | ||||
| 		for _, zone := range zones { | ||||
| 			datastoreInZone := TestContext.NodeMapper.GetDatastoresInZone(nodeInfo.VSphere.Config.Hostname, zone) | ||||
| 			for _, datastore := range datastoreInZone { | ||||
| 				datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1 | ||||
| 				if datastoreCountMap[datastore] == numZones { | ||||
| 					commonDatastores = append(commonDatastores, datastore) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		gomega.Expect(commonDatastores).To(gomega.ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Get vSphere Volume Path from PVC | ||||
| func getvSphereVolumePathFromClaim(ctx context.Context, client clientset.Interface, namespace string, claimName string) string { | ||||
| 	pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, claimName, metav1.GetOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	return pv.Spec.VsphereVolume.VolumePath | ||||
| } | ||||
|  | ||||
| // Get canonical volume path for volume Path. | ||||
| // Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk | ||||
| // Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path. | ||||
| func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) { | ||||
| 	var folderID string | ||||
| 	canonicalVolumePath := volumePath | ||||
| 	dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/") | ||||
| 	if len(dsPath) <= 1 { | ||||
| 		return canonicalVolumePath, nil | ||||
| 	} | ||||
| 	datastore := dsPathObj.Datastore | ||||
| 	dsFolder := dsPath[0] | ||||
| 	// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap | ||||
| 	if !isValidUUID(dsFolder) { | ||||
| 		dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + dummyDiskName | ||||
| 		// Querying a non-existent dummy disk on the datastore folder. | ||||
| 		// It would fail and return an folder ID in the error message. | ||||
| 		_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath) | ||||
| 		if err != nil { | ||||
| 			re := regexp.MustCompile("File (.*?) was not found") | ||||
| 			match := re.FindStringSubmatch(err.Error()) | ||||
| 			canonicalVolumePath = match[1] | ||||
| 		} | ||||
| 	} | ||||
| 	diskPath := getPathFromVMDiskPath(canonicalVolumePath) | ||||
| 	if diskPath == "" { | ||||
| 		return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath) | ||||
| 	} | ||||
| 	folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0] | ||||
| 	canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1) | ||||
| 	return canonicalVolumePath, nil | ||||
| } | ||||
|  | ||||
| // getPathFromVMDiskPath retrieves the path from VM Disk Path. | ||||
| // Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk | ||||
| func getPathFromVMDiskPath(vmDiskPath string) string { | ||||
| 	datastorePathObj := new(object.DatastorePath) | ||||
| 	isSuccess := datastorePathObj.FromString(vmDiskPath) | ||||
| 	if !isSuccess { | ||||
| 		framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath) | ||||
| 		return "" | ||||
| 	} | ||||
| 	return datastorePathObj.Path | ||||
| } | ||||
|  | ||||
| // getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path. | ||||
| func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) { | ||||
| 	datastorePathObj := new(object.DatastorePath) | ||||
| 	isSuccess := datastorePathObj.FromString(vmDiskPath) | ||||
| 	if !isSuccess { | ||||
| 		framework.Logf("Failed to parse volPath: %s", vmDiskPath) | ||||
| 		return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath) | ||||
| 	} | ||||
| 	return datastorePathObj, nil | ||||
| } | ||||
|  | ||||
| // getVirtualDiskPage83Data gets the virtual disk UUID by diskPath | ||||
| func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) { | ||||
| 	if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" { | ||||
| 		diskPath += ".vmdk" | ||||
| 	} | ||||
| 	vdm := object.NewVirtualDiskManager(dc.Client()) | ||||
| 	// Returns uuid of vmdk virtual disk | ||||
| 	diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc) | ||||
|  | ||||
| 	if err != nil { | ||||
| 		klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	diskUUID = formatVirtualDiskUUID(diskUUID) | ||||
| 	return diskUUID, nil | ||||
| } | ||||
|  | ||||
| // formatVirtualDiskUUID removes any spaces and hyphens in UUID | ||||
| // Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa | ||||
| func formatVirtualDiskUUID(uuid string) string { | ||||
| 	uuidwithNoSpace := strings.Replace(uuid, " ", "", -1) | ||||
| 	uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1) | ||||
| 	return strings.ToLower(uuidWithNoHypens) | ||||
| } | ||||
|  | ||||
| // isValidUUID checks if the string is a valid UUID. | ||||
| func isValidUUID(uuid string) bool { | ||||
| 	r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$") | ||||
| 	return r.MatchString(uuid) | ||||
| } | ||||
|  | ||||
| // removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath | ||||
| // for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk | ||||
| // for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk | ||||
| func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string { | ||||
| 	datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1] | ||||
| 	if filepath.Base(datastore) != datastore { | ||||
| 		vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1) | ||||
| 	} | ||||
| 	return vDiskPath | ||||
| } | ||||
|  | ||||
| // getVirtualDeviceByPath gets the virtual device by path | ||||
| func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) { | ||||
| 	vmDevices, err := vm.Device(ctx) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	// filter vm devices to retrieve device for the given vmdk file identified by disk path | ||||
| 	for _, device := range vmDevices { | ||||
| 		if vmDevices.TypeName(device) == "VirtualDisk" { | ||||
| 			virtualDevice := device.GetVirtualDevice() | ||||
| 			if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok { | ||||
| 				if matchVirtualDiskAndVolPath(backing.FileName, diskPath) { | ||||
| 					framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) | ||||
| 					return device, nil | ||||
| 				} | ||||
| 				framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
| func matchVirtualDiskAndVolPath(diskPath, volPath string) bool { | ||||
| 	fileExt := ".vmdk" | ||||
| 	diskPath = strings.TrimSuffix(diskPath, fileExt) | ||||
| 	volPath = strings.TrimSuffix(volPath, fileExt) | ||||
| 	return diskPath == volPath | ||||
| } | ||||
|  | ||||
| // convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath | ||||
| func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) { | ||||
| 	vmVolumes := make(map[string][]string) | ||||
| 	for nodeName, volPaths := range nodeVolumes { | ||||
| 		nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) | ||||
| 		datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef) | ||||
| 		for i, volPath := range volPaths { | ||||
| 			deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath) | ||||
| 			if err != nil { | ||||
| 				framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err) | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			volPaths[i] = deviceVolPath | ||||
| 		} | ||||
| 		vmVolumes[nodeName] = volPaths | ||||
| 	} | ||||
| 	return vmVolumes, nil | ||||
| } | ||||
|  | ||||
| // convertVolPathToDevicePath takes volPath and returns canonical volume path | ||||
| func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) { | ||||
| 	volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath) | ||||
| 	// Get the canonical volume path for volPath. | ||||
| 	canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err) | ||||
| 		return "", err | ||||
| 	} | ||||
| 	// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map | ||||
| 	if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" { | ||||
| 		canonicalVolumePath += ".vmdk" | ||||
| 	} | ||||
| 	return canonicalVolumePath, nil | ||||
| } | ||||
|  | ||||
| // get .vmx file path for a virtual machine | ||||
| func getVMXFilePath(ctx context.Context, vmObject *object.VirtualMachine) (vmxPath string) { | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	var nodeVM mo.VirtualMachine | ||||
| 	err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil()) | ||||
|  | ||||
| 	vmxPath = nodeVM.Config.Files.VmPathName | ||||
| 	framework.Logf("vmx file path is %s", vmxPath) | ||||
| 	return vmxPath | ||||
| } | ||||
|  | ||||
| // verify ready node count. Try up to 3 minutes. Return true if count is expected count | ||||
| func verifyReadyNodeCount(ctx context.Context, client clientset.Interface, expectedNodes int) bool { | ||||
| 	numNodes := 0 | ||||
| 	for i := 0; i < 36; i++ { | ||||
| 		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		numNodes = len(nodeList.Items) | ||||
| 		if numNodes == expectedNodes { | ||||
| 			break | ||||
| 		} | ||||
| 		time.Sleep(5 * time.Second) | ||||
| 	} | ||||
| 	return (numNodes == expectedNodes) | ||||
| } | ||||
|  | ||||
| // poweroff nodeVM and confirm the poweroff state | ||||
| func poweroffNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) { | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	framework.Logf("Powering off node VM %s", nodeName) | ||||
|  | ||||
| 	_, err := vm.PowerOff(ctx) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff) | ||||
| 	framework.ExpectNoError(err, "Unable to power off the node") | ||||
| } | ||||
|  | ||||
| // poweron nodeVM and confirm the poweron state | ||||
| func poweronNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) { | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	framework.Logf("Powering on node VM %s", nodeName) | ||||
|  | ||||
| 	vm.PowerOn(ctx) | ||||
| 	err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn) | ||||
| 	framework.ExpectNoError(err, "Unable to power on the node") | ||||
| } | ||||
|  | ||||
| // unregister a nodeVM from VC | ||||
| func unregisterNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) { | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	poweroffNodeVM(ctx, nodeName, vm) | ||||
|  | ||||
| 	framework.Logf("Unregistering node VM %s", nodeName) | ||||
| 	err := vm.Unregister(ctx) | ||||
| 	framework.ExpectNoError(err, "Unable to unregister the node") | ||||
| } | ||||
|  | ||||
| // register a nodeVM into a VC | ||||
| func registerNodeVM(ctx context.Context, nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) { | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath) | ||||
|  | ||||
| 	nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) | ||||
| 	finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false) | ||||
|  | ||||
| 	vmFolder, err := finder.FolderOrDefault(ctx, workingDir) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	err = registerTask.Wait(ctx) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	vmPath := filepath.Join(workingDir, nodeName) | ||||
| 	vm, err := finder.VirtualMachine(ctx, vmPath) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	poweronNodeVM(ctx, nodeName, vm) | ||||
| } | ||||
|  | ||||
| // disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state | ||||
| func disksAreAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) { | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	disksAttached := make(map[string]map[string]bool) | ||||
| 	if len(nodeVolumes) == 0 { | ||||
| 		return disksAttached, nil | ||||
| 	} | ||||
| 	// Convert VolPaths into canonical form so that it can be compared with the VM device path. | ||||
| 	vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err) | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	for vm, volumes := range vmVolumes { | ||||
| 		volumeAttachedMap := make(map[string]bool) | ||||
| 		for _, volume := range volumes { | ||||
| 			attached, err := diskIsAttached(ctx, volume, vm) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			volumeAttachedMap[volume] = attached | ||||
| 		} | ||||
| 		disksAttached[vm] = volumeAttachedMap | ||||
| 	} | ||||
| 	return disksAttached, nil | ||||
| } | ||||
|  | ||||
| // diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin. | ||||
| func diskIsAttached(ctx context.Context, volPath string, nodeName string) (bool, error) { | ||||
| 	// Create context | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
| 	nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) | ||||
| 	Connect(ctx, nodeInfo.VSphere) | ||||
| 	vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) | ||||
| 	volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath) | ||||
| 	device, err := getVirtualDeviceByPath(ctx, vm, volPath) | ||||
| 	if err != nil { | ||||
| 		framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q", | ||||
| 			volPath, | ||||
| 			nodeName) | ||||
| 		return false, err | ||||
| 	} | ||||
| 	if device == nil { | ||||
| 		return false, nil | ||||
| 	} | ||||
| 	framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName) | ||||
| 	return true, nil | ||||
| } | ||||
|  | ||||
| // getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID | ||||
| // this gives the VM UUID which can be used to find Node VM from vCenter | ||||
| func getUUIDFromProviderID(providerID string) string { | ||||
| 	return strings.TrimPrefix(providerID, providerPrefix) | ||||
| } | ||||
|  | ||||
| // GetReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state | ||||
| func GetReadySchedulableNodeInfos(ctx context.Context, c clientset.Interface) []*NodeInfo { | ||||
| 	var nodesInfo []*NodeInfo | ||||
| 	if TestContext.NodeMapper != nil { | ||||
| 		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		for _, node := range nodeList.Items { | ||||
| 			nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name) | ||||
| 			if nodeInfo != nil { | ||||
| 				nodesInfo = append(nodesInfo, nodeInfo) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nodesInfo | ||||
| } | ||||
|  | ||||
| // GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node. | ||||
| // if multiple nodes are present with Ready and Schedulable state then one of the Node is selected randomly | ||||
| // and it's associated NodeInfo object is returned. | ||||
| func GetReadySchedulableRandomNodeInfo(ctx context.Context, c clientset.Interface) *NodeInfo { | ||||
| 	nodesInfo := GetReadySchedulableNodeInfos(ctx, c) | ||||
| 	gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty()) | ||||
| 	return nodesInfo[rand.Int()%len(nodesInfo)] | ||||
| } | ||||
|  | ||||
| // invokeVCenterServiceControl invokes the given command for the given service | ||||
| // via service-control on the given vCenter host over SSH. | ||||
| func invokeVCenterServiceControl(ctx context.Context, command, service, host string) error { | ||||
| 	sshCmd := fmt.Sprintf("service-control --%s %s", command, service) | ||||
| 	framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host) | ||||
| 	result, err := e2essh.SSH(ctx, sshCmd, host, framework.TestContext.Provider) | ||||
| 	if err != nil || result.Code != 0 { | ||||
| 		e2essh.LogResult(result) | ||||
| 		return fmt.Errorf("couldn't execute command: %s on vCenter host: %w", sshCmd, err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // expectVolumeToBeAttached checks if the given Volume is attached to the given | ||||
| // Node, else fails. | ||||
| func expectVolumeToBeAttached(ctx context.Context, nodeName, volumePath string) { | ||||
| 	isAttached, err := diskIsAttached(ctx, volumePath, nodeName) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	if !isAttached { | ||||
| 		framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // expectVolumesToBeAttached checks if the given Volumes are attached to the | ||||
| // corresponding set of Nodes, else fails. | ||||
| func expectVolumesToBeAttached(ctx context.Context, pods []*v1.Pod, volumePaths []string) { | ||||
| 	for i, pod := range pods { | ||||
| 		nodeName := pod.Spec.NodeName | ||||
| 		volumePath := volumePaths[i] | ||||
| 		ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) | ||||
| 		expectVolumeToBeAttached(ctx, nodeName, volumePath) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // expectFilesToBeAccessible checks if the given files are accessible on the | ||||
| // corresponding set of Nodes, else fails. | ||||
| func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []string) { | ||||
| 	for i, pod := range pods { | ||||
| 		podName := pod.Name | ||||
| 		filePath := filePaths[i] | ||||
| 		ginkgo.By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName)) | ||||
| 		verifyFilesExistOnVSphereVolume(namespace, podName, filePath) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // writeContentToPodFile writes the given content to the specified file. | ||||
| func writeContentToPodFile(namespace, podName, filePath, content string) error { | ||||
| 	_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, | ||||
| 		"--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath)) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // expectFileContentToMatch checks if a given file contains the specified | ||||
| // content, else fails. | ||||
| func expectFileContentToMatch(namespace, podName, filePath, content string) { | ||||
| 	_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, | ||||
| 		"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath)) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName)) | ||||
| } | ||||
|  | ||||
| // expectFileContentsToMatch checks if the given contents match the ones present | ||||
| // in corresponding files on respective Pods, else fails. | ||||
| func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []string, contents []string) { | ||||
| 	for i, pod := range pods { | ||||
| 		podName := pod.Name | ||||
| 		filePath := filePaths[i] | ||||
| 		ginkgo.By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName)) | ||||
| 		expectFileContentToMatch(namespace, podName, filePath, contents[i]) | ||||
| 	} | ||||
| } | ||||
| @@ -1,138 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| Tests to verify volume provisioning on a clustered datastore | ||||
| 1. Static provisioning | ||||
| 2. Dynamic provisioning | ||||
| 3. Dynamic provisioning with spbm policy | ||||
|  | ||||
| This test reads env | ||||
| 1. CLUSTER_DATASTORE which should be set to clustered datastore | ||||
| 2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore | ||||
| */ | ||||
| var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-provision") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
|  | ||||
| 	var ( | ||||
| 		client           clientset.Interface | ||||
| 		namespace        string | ||||
| 		scParameters     map[string]string | ||||
| 		clusterDatastore string | ||||
| 		nodeInfo         *NodeInfo | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, client) | ||||
| 		scParameters = make(map[string]string) | ||||
| 		clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Steps: | ||||
| 		1. Create volume options with datastore to be a clustered datastore | ||||
| 		2. Create a vsphere volume | ||||
| 		3. Create podspec with volume path. Create a corresponding pod | ||||
| 		4. Verify disk is attached | ||||
| 		5. Delete the pod and wait for the disk to be detached | ||||
| 		6. Delete the volume | ||||
| 	*/ | ||||
|  | ||||
| 	ginkgo.It("verify static provisioning on clustered datastore", func(ctx context.Context) { | ||||
| 		var volumePath string | ||||
|  | ||||
| 		ginkgo.By("creating a test vsphere volume") | ||||
| 		volumeOptions := new(VolumeOptions) | ||||
| 		volumeOptions.CapacityKB = 2097152 | ||||
| 		volumeOptions.Name = "e2e-vmdk-" + namespace | ||||
| 		volumeOptions.Datastore = clusterDatastore | ||||
|  | ||||
| 		volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		defer func() { | ||||
| 			ginkgo.By("Deleting the vsphere volume") | ||||
| 			nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) | ||||
| 		}() | ||||
|  | ||||
| 		podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) | ||||
|  | ||||
| 		ginkgo.By("Creating pod") | ||||
| 		pod, err := client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.By("Waiting for pod to be ready") | ||||
| 		gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) | ||||
|  | ||||
| 		// get fresh pod info | ||||
| 		pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		nodeName := pod.Spec.NodeName | ||||
|  | ||||
| 		ginkgo.By("Verifying volume is attached") | ||||
| 		expectVolumeToBeAttached(ctx, nodeName, volumePath) | ||||
|  | ||||
| 		ginkgo.By("Deleting pod") | ||||
| 		err = e2epod.DeletePodWithWait(ctx, client, pod) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Waiting for volumes to be detached from the node") | ||||
| 		err = waitForVSphereDiskToDetach(ctx, volumePath, nodeName) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Steps: | ||||
| 		1. Create storage class parameter and specify datastore to be a clustered datastore name | ||||
| 		2. invokeValidPolicyTest - util to do e2e dynamic provision test | ||||
| 	*/ | ||||
| 	ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func(ctx context.Context) { | ||||
| 		scParameters[Datastore] = clusterDatastore | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Steps: | ||||
| 		1. Create storage class parameter and specify storage policy to be a tag based spbm policy | ||||
| 		2. invokeValidPolicyTest - util to do e2e dynamic provision test | ||||
| 	*/ | ||||
| 	ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func(ctx context.Context) { | ||||
| 		policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster) | ||||
| 		scParameters[SpbmStoragePolicy] = policyDatastoreCluster | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,117 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
|  | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	invalidDatastore = "invalidDatastore" | ||||
| 	datastoreSCName  = "datastoresc" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| 	Test to verify datastore specified in storage-class is being honored while volume creation. | ||||
|  | ||||
| 	Steps | ||||
| 	1. Create StorageClass with invalid datastore. | ||||
| 	2. Create PVC which uses the StorageClass created in step 1. | ||||
| 	3. Expect the PVC to fail. | ||||
| 	4. Verify the error returned on PVC failure is the correct. | ||||
| */ | ||||
|  | ||||
| var _ = utils.SIGDescribe("Volume Provisioning on Datastore", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-datastore") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		client                     clientset.Interface | ||||
| 		namespace                  string | ||||
| 		scParameters               map[string]string | ||||
| 		vSphereCSIMigrationEnabled bool | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		scParameters = make(map[string]string) | ||||
| 		_, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		vSphereCSIMigrationEnabled = GetAndExpectBoolEnvVar(VSphereCSIMigrationEnabled) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test for invalid datastore") | ||||
| 		scParameters[Datastore] = invalidDatastore | ||||
| 		scParameters[DiskFormat] = ThinDisk | ||||
| 		err := invokeInvalidDatastoreTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		var errorMsg string | ||||
| 		if !vSphereCSIMigrationEnabled { | ||||
| 			errorMsg = `Failed to provision volume with StorageClass \"` + datastoreSCName + `\": Datastore '` + invalidDatastore + `' not found` | ||||
| 		} else { | ||||
| 			errorMsg = `failed to find datastoreURL for datastore name: \"` + invalidDatastore + `\"` | ||||
| 		} | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func invokeInvalidDatastoreTestNeg(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string) error { | ||||
| 	ginkgo.By("Creating Storage Class With Invalid Datastore") | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	ginkgo.By("Expect claim to fail provisioning volume") | ||||
| 	err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) | ||||
| 	framework.ExpectError(err) | ||||
|  | ||||
| 	eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	var eventErrorMessages string | ||||
| 	for _, event := range eventList.Items { | ||||
| 		if event.Type != v1.EventTypeNormal { | ||||
| 			eventErrorMessages = eventErrorMessages + event.Message + ";" | ||||
| 		} | ||||
| 	} | ||||
| 	return fmt.Errorf("event messages: %+q", eventErrorMessages) | ||||
| } | ||||
| @@ -1,213 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"path/filepath" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/uuid" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| 	Test to verify diskformat specified in storage-class is being honored while volume creation. | ||||
| 	Valid and supported options are eagerzeroedthick, zeroedthick and thin | ||||
|  | ||||
| 	Steps | ||||
| 	1. Create StorageClass with diskformat set to valid type | ||||
| 	2. Create PVC which uses the StorageClass created in step 1. | ||||
| 	3. Wait for PV to be provisioned. | ||||
| 	4. Wait for PVC's status to become Bound | ||||
| 	5. Create pod using PVC on specific node. | ||||
| 	6. Wait for Disk to be attached to the node. | ||||
| 	7. Get node VM's devices and find PV's Volume Disk. | ||||
| 	8. Get Backing Info of the Volume Disk and obtain EagerlyScrub and ThinProvisioned | ||||
| 	9. Based on the value of EagerlyScrub and ThinProvisioned, verify diskformat is correct. | ||||
| 	10. Delete pod and Wait for Volume Disk to be detached from the Node. | ||||
| 	11. Delete PVC, PV and Storage Class | ||||
| */ | ||||
|  | ||||
| var _ = utils.SIGDescribe("Volume Disk Format", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-disk-format") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	const ( | ||||
| 		NodeLabelKey = "vsphere_e2e_label_volume_diskformat" | ||||
| 	) | ||||
| 	var ( | ||||
| 		client            clientset.Interface | ||||
| 		namespace         string | ||||
| 		nodeName          string | ||||
| 		nodeKeyValueLabel map[string]string | ||||
| 		nodeLabelValue    string | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		nodeName = GetReadySchedulableRandomNodeInfo(ctx, client).Name | ||||
| 		nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID()) | ||||
| 		nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue} | ||||
| 		e2enode.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue) | ||||
| 		ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, client, nodeName, NodeLabelKey) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test for diskformat: eagerzeroedthick") | ||||
| 		invokeTest(ctx, f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick") | ||||
| 	}) | ||||
| 	ginkgo.It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test for diskformat: zeroedthick") | ||||
| 		invokeTest(ctx, f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick") | ||||
| 	}) | ||||
| 	ginkgo.It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test for diskformat: thin") | ||||
| 		invokeTest(ctx, f, client, namespace, nodeName, nodeKeyValueLabel, "thin") | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func invokeTest(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, diskFormat string) { | ||||
|  | ||||
| 	framework.Logf("Invoking Test for DiskFomat: %s", diskFormat) | ||||
| 	scParameters := make(map[string]string) | ||||
| 	scParameters["diskformat"] = diskFormat | ||||
|  | ||||
| 	ginkgo.By("Creating Storage Class With DiskFormat") | ||||
| 	storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil, "") | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, storageClassSpec, metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) | ||||
| 	pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, pvclaimSpec, metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.DeferCleanup(framework.IgnoreNotFound(client.CoreV1().PersistentVolumeClaims(namespace).Delete), pvclaimSpec.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Get new copy of the claim | ||||
| 	pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Get the bound PV | ||||
| 	pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	/* | ||||
| 		PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info | ||||
| 		to check EagerlyScrub and ThinProvisioned property | ||||
| 	*/ | ||||
| 	ginkgo.By("Creating pod to attach PV to the node") | ||||
| 	// Create pod to attach Volume to Node | ||||
| 	podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") | ||||
| 	pod, err := client.CoreV1().Pods(namespace).Create(ctx, podSpec, metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.By("Waiting for pod to be running") | ||||
| 	gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) | ||||
|  | ||||
| 	isAttached, err := diskIsAttached(ctx, pv.Spec.VsphereVolume.VolumePath, nodeName) | ||||
| 	if !isAttached { | ||||
| 		framework.Failf("Volume: %s is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName) | ||||
| 	} | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.By("Verify Disk Format") | ||||
| 	if !verifyDiskFormat(ctx, client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat) { | ||||
| 		framework.Failf("DiskFormat Verification Failed. Node: %s, VolumePath: %s, Expected Format: %s", nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat) | ||||
| 	} | ||||
|  | ||||
| 	var volumePaths []string | ||||
| 	volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath) | ||||
|  | ||||
| 	ginkgo.By("Delete pod and wait for volume to be detached from node") | ||||
| 	deletePodAndWaitForVolumeToDetach(ctx, f, client, pod, nodeName, volumePaths) | ||||
|  | ||||
| } | ||||
|  | ||||
| func verifyDiskFormat(ctx context.Context, client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { | ||||
| 	ginkgo.By("Verifying disk format") | ||||
| 	eagerlyScrub := false | ||||
| 	thinProvisioned := false | ||||
| 	diskFound := false | ||||
| 	pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath) | ||||
|  | ||||
| 	ctx, cancel := context.WithCancel(ctx) | ||||
| 	defer cancel() | ||||
|  | ||||
| 	nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) | ||||
| 	vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) | ||||
| 	vmDevices, err := vm.Device(ctx) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	disks := vmDevices.SelectByType((*types.VirtualDisk)(nil)) | ||||
|  | ||||
| 	for _, disk := range disks { | ||||
| 		backing := disk.GetVirtualDevice().Backing.(*types.VirtualDiskFlatVer2BackingInfo) | ||||
| 		backingFileName := filepath.Base(backing.FileName) + filepath.Ext(backing.FileName) | ||||
| 		if backingFileName == pvvmdkfileName { | ||||
| 			diskFound = true | ||||
| 			if backing.EagerlyScrub != nil { | ||||
| 				eagerlyScrub = *backing.EagerlyScrub | ||||
| 			} | ||||
| 			if backing.ThinProvisioned != nil { | ||||
| 				thinProvisioned = *backing.ThinProvisioned | ||||
| 			} | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if !diskFound { | ||||
| 		framework.Failf("Failed to find disk: %s", pvVolumePath) | ||||
| 	} | ||||
| 	isDiskFormatCorrect := false | ||||
| 	if diskFormat == "eagerzeroedthick" { | ||||
| 		if eagerlyScrub && !thinProvisioned { | ||||
| 			isDiskFormatCorrect = true | ||||
| 		} | ||||
| 	} else if diskFormat == "zeroedthick" { | ||||
| 		if !eagerlyScrub && !thinProvisioned { | ||||
| 			isDiskFormatCorrect = true | ||||
| 		} | ||||
| 	} else if diskFormat == "thin" { | ||||
| 		if !eagerlyScrub && thinProvisioned { | ||||
| 			isDiskFormatCorrect = true | ||||
| 		} | ||||
| 	} | ||||
| 	return isDiskFormatCorrect | ||||
| } | ||||
| @@ -1,103 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/api/resource" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	diskSizeSCName = "disksizesc" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| 	Test to verify disk size specified in PVC is being rounded up correctly. | ||||
|  | ||||
| 	Steps | ||||
| 	1. Create StorageClass. | ||||
| 	2. Create PVC with invalid disk size which uses the StorageClass created in step 1. | ||||
| 	3. Verify the provisioned PV size is correct. | ||||
| */ | ||||
|  | ||||
| var _ = utils.SIGDescribe("Volume Disk Size", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-disksize") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		client       clientset.Interface | ||||
| 		namespace    string | ||||
| 		scParameters map[string]string | ||||
| 		datastore    string | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func() { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		scParameters = make(map[string]string) | ||||
| 		datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify dynamically provisioned pv has size rounded up correctly", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test disk size") | ||||
| 		scParameters[Datastore] = datastore | ||||
| 		scParameters[DiskFormat] = ThinDisk | ||||
| 		diskSize := "1" | ||||
| 		expectedDiskSize := "1Mi" | ||||
|  | ||||
| 		ginkgo.By("Creating Storage Class") | ||||
| 		storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 		ginkgo.By("Creating PVC using the Storage Class") | ||||
| 		pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 		ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 		err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Getting new copy of PVC") | ||||
| 		pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Getting PV created") | ||||
| 		pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Verifying if provisioned PV has the correct size") | ||||
| 		expectedCapacity := resource.MustParse(expectedDiskSize) | ||||
| 		pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] | ||||
| 		gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value())) | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,201 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	ext4FSType    = "ext4" | ||||
| 	ext3FSType    = "ext3" | ||||
| 	invalidFSType = "ext10" | ||||
| 	execCommand   = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| 	Test to verify fstype specified in storage-class is being honored after volume creation. | ||||
|  | ||||
| 	Steps | ||||
| 	1. Create StorageClass with fstype set to valid type (default case included). | ||||
| 	2. Create PVC which uses the StorageClass created in step 1. | ||||
| 	3. Wait for PV to be provisioned. | ||||
| 	4. Wait for PVC's status to become Bound. | ||||
| 	5. Create pod using PVC on specific node. | ||||
| 	6. Wait for Disk to be attached to the node. | ||||
| 	7. Execute command in the pod to get fstype. | ||||
| 	8. Delete pod and Wait for Volume Disk to be detached from the Node. | ||||
| 	9. Delete PVC, PV and Storage Class. | ||||
|  | ||||
| 	Test to verify if an invalid fstype specified in storage class fails pod creation. | ||||
|  | ||||
| 	Steps | ||||
| 	1. Create StorageClass with invalid. | ||||
| 	2. Create PVC which uses the StorageClass created in step 1. | ||||
| 	3. Wait for PV to be provisioned. | ||||
| 	4. Wait for PVC's status to become Bound. | ||||
| 	5. Create pod using PVC. | ||||
| 	6. Verify if the pod creation fails. | ||||
| 	7. Verify if the MountVolume.MountDevice fails because it is unable to find the file system executable file on the node. | ||||
| */ | ||||
|  | ||||
| var _ = utils.SIGDescribe("Volume FStype", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-fstype") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		client clientset.Interface | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		gomega.Expect(GetReadySchedulableNodeInfos(ctx, client)).NotTo(gomega.BeEmpty()) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify fstype - ext3 formatted volume", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test for fstype: ext3") | ||||
| 		invokeTestForFstype(ctx, f, ext3FSType, ext3FSType) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify fstype - default value should be ext4", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test for fstype: Default Value - ext4") | ||||
| 		invokeTestForFstype(ctx, f, "", ext4FSType) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify invalid fstype", func(ctx context.Context) { | ||||
| 		ginkgo.By("Invoking Test for fstype: invalid Value") | ||||
| 		invokeTestForInvalidFstype(ctx, f, client, invalidFSType) | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func invokeTestForFstype(ctx context.Context, f *framework.Framework, fstype string, expectedContent string) { | ||||
| 	framework.Logf("Invoking Test for fstype: %s", fstype) | ||||
| 	namespace := f.Namespace.Name | ||||
| 	scParameters := make(map[string]string) | ||||
| 	scParameters["fstype"] = fstype | ||||
|  | ||||
| 	// Create Persistent Volume | ||||
| 	ginkgo.By("Creating Storage Class With Fstype") | ||||
| 	pvclaim, persistentvolumes := createVolume(ctx, f.ClientSet, f.Timeouts, f.Namespace.Name, scParameters) | ||||
|  | ||||
| 	// Create Pod and verify the persistent volume is accessible | ||||
| 	pod := createPodAndVerifyVolumeAccessible(ctx, f, pvclaim, persistentvolumes) | ||||
| 	_, err := e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Detach and delete volume | ||||
| 	detachVolume(ctx, f, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) | ||||
| 	err = e2epv.DeletePersistentVolumeClaim(ctx, f.ClientSet, pvclaim.Name, namespace) | ||||
| 	framework.ExpectNoError(err) | ||||
| } | ||||
|  | ||||
| func invokeTestForInvalidFstype(ctx context.Context, f *framework.Framework, client clientset.Interface, fstype string) { | ||||
| 	namespace := f.Namespace.Name | ||||
| 	scParameters := make(map[string]string) | ||||
| 	scParameters["fstype"] = fstype | ||||
|  | ||||
| 	// Create Persistent Volume | ||||
| 	ginkgo.By("Creating Storage Class With Invalid Fstype") | ||||
| 	pvclaim, persistentvolumes := createVolume(ctx, client, f.Timeouts, namespace, scParameters) | ||||
|  | ||||
| 	ginkgo.By("Creating pod to attach PV to the node") | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
| 	// Create pod to attach Volume to Node | ||||
| 	pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, execCommand) | ||||
| 	framework.ExpectError(err) | ||||
|  | ||||
| 	eventList, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Detach and delete volume | ||||
| 	detachVolume(ctx, f, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) | ||||
| 	err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) | ||||
| 	errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found` | ||||
| 	isFound := false | ||||
| 	for _, item := range eventList.Items { | ||||
| 		if strings.Contains(item.Message, errorMsg) { | ||||
| 			isFound = true | ||||
| 		} | ||||
| 	} | ||||
| 	if !isFound { | ||||
| 		framework.Failf("Unable to verify MountVolume.MountDevice failure for volume %s", persistentvolumes[0].Name) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func createVolume(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	return pvclaim, persistentvolumes | ||||
| } | ||||
|  | ||||
| func createPodAndVerifyVolumeAccessible(ctx context.Context, f *framework.Framework, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod { | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
| 	ginkgo.By("Creating pod to attach PV to the node") | ||||
| 	// Create pod to attach Volume to Node | ||||
| 	pod, err := e2epod.CreatePod(ctx, f.ClientSet, f.Namespace.Name, nil, pvclaims, f.NamespacePodSecurityLevel, execCommand) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Asserts: Right disk is attached to the pod | ||||
| 	ginkgo.By("Verify the volume is accessible and available in the pod") | ||||
| 	verifyVSphereVolumesAccessible(ctx, f.ClientSet, pod, persistentvolumes) | ||||
| 	return pod | ||||
| } | ||||
|  | ||||
| // detachVolume delete the volume passed in the argument and wait until volume is detached from the node, | ||||
| func detachVolume(ctx context.Context, f *framework.Framework, pod *v1.Pod, volPath string) { | ||||
| 	pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	nodeName := pod.Spec.NodeName | ||||
| 	ginkgo.By("Deleting pod") | ||||
| 	err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.By("Waiting for volumes to be detached from the node") | ||||
| 	framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volPath, nodeName)) | ||||
| } | ||||
| @@ -1,202 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/uuid" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/pkg/cluster/ports" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| // waitForKubeletUp waits for the kubelet on the given host to be up. | ||||
| func waitForKubeletUp(ctx context.Context, host string) error { | ||||
| 	cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz" | ||||
| 	for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) { | ||||
| 		result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) | ||||
| 		if err != nil || result.Code != 0 { | ||||
| 			e2essh.LogResult(result) | ||||
| 		} | ||||
| 		if result.Stdout == "ok" { | ||||
| 			return nil | ||||
| 		} | ||||
| 	} | ||||
| 	return fmt.Errorf("waiting for kubelet timed out") | ||||
| } | ||||
|  | ||||
| // restartKubelet restarts kubelet on the given host. | ||||
| func restartKubelet(ctx context.Context, host string) error { | ||||
| 	var cmd string | ||||
|  | ||||
| 	var sudoPresent bool | ||||
| 	sshResult, err := e2essh.SSH(ctx, "sudo --version", host, framework.TestContext.Provider) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("Unable to ssh to host %s with error %v", host, err) | ||||
| 	} | ||||
| 	if !strings.Contains(sshResult.Stderr, "command not found") { | ||||
| 		sudoPresent = true | ||||
| 	} | ||||
| 	sshResult, err = e2essh.SSH(ctx, "systemctl --version", host, framework.TestContext.Provider) | ||||
| 	if err != nil { | ||||
| 		return fmt.Errorf("Failed to execute command 'systemctl' on host %s with error %v", host, err) | ||||
| 	} | ||||
| 	if !strings.Contains(sshResult.Stderr, "command not found") { | ||||
| 		cmd = "systemctl restart kubelet" | ||||
| 	} else { | ||||
| 		cmd = "service kubelet restart" | ||||
| 	} | ||||
| 	if sudoPresent { | ||||
| 		cmd = fmt.Sprintf("sudo %s", cmd) | ||||
| 	} | ||||
|  | ||||
| 	framework.Logf("Restarting kubelet via ssh on host %s with command %s", host, cmd) | ||||
| 	result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) | ||||
| 	if err != nil || result.Code != 0 { | ||||
| 		e2essh.LogResult(result) | ||||
| 		return fmt.Errorf("couldn't restart kubelet: %w", err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| /* | ||||
| Test to verify volume remains attached after kubelet restart on master node | ||||
| For the number of schedulable nodes, | ||||
| 1. Create a volume with default volume options | ||||
| 2. Create a Pod | ||||
| 3. Verify the volume is attached | ||||
| 4. Restart the kubelet on master node | ||||
| 5. Verify again that the volume is attached | ||||
| 6. Delete the pod and wait for the volume to be detached | ||||
| 7. Delete the volume | ||||
| */ | ||||
| var _ = utils.SIGDescribe("Volume Attach Verify", feature.Vsphere, framework.WithSerial(), framework.WithDisruptive(), func() { | ||||
| 	f := framework.NewDefaultFramework("restart-master") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
|  | ||||
| 	const labelKey = "vsphere_e2e_label" | ||||
| 	var ( | ||||
| 		client                clientset.Interface | ||||
| 		namespace             string | ||||
| 		volumePaths           []string | ||||
| 		pods                  []*v1.Pod | ||||
| 		numNodes              int | ||||
| 		nodeKeyValueLabelList []map[string]string | ||||
| 		nodeNameList          []string | ||||
| 		nodeInfo              *NodeInfo | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) | ||||
|  | ||||
| 		nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		numNodes = len(nodes.Items) | ||||
| 		if numNodes < 2 { | ||||
| 			e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) | ||||
| 		} | ||||
| 		nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name) | ||||
| 		for i := 0; i < numNodes; i++ { | ||||
| 			nodeName := nodes.Items[i].Name | ||||
| 			nodeNameList = append(nodeNameList, nodeName) | ||||
| 			nodeLabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) | ||||
| 			nodeKeyValueLabel := make(map[string]string) | ||||
| 			nodeKeyValueLabel[labelKey] = nodeLabelValue | ||||
| 			nodeKeyValueLabelList = append(nodeKeyValueLabelList, nodeKeyValueLabel) | ||||
| 			e2enode.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabelValue) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify volume remains attached after master kubelet restart", func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessSSHKeyPresent() | ||||
|  | ||||
| 		// Create pod on each node | ||||
| 		for i := 0; i < numNodes; i++ { | ||||
| 			ginkgo.By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) | ||||
| 			volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			volumePaths = append(volumePaths, volumePath) | ||||
|  | ||||
| 			ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) | ||||
| 			podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) | ||||
| 			pod, err := client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod) | ||||
|  | ||||
| 			ginkgo.By("Waiting for pod to be ready") | ||||
| 			gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) | ||||
|  | ||||
| 			pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			pods = append(pods, pod) | ||||
|  | ||||
| 			nodeName := pod.Spec.NodeName | ||||
| 			ginkgo.By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName)) | ||||
| 			expectVolumeToBeAttached(ctx, nodeName, volumePath) | ||||
| 		} | ||||
|  | ||||
| 		ginkgo.By("Restarting kubelet on instance node") | ||||
| 		instanceAddress := framework.APIAddress() + ":22" | ||||
| 		err := restartKubelet(ctx, instanceAddress) | ||||
| 		framework.ExpectNoError(err, "Unable to restart kubelet on instance node") | ||||
|  | ||||
| 		ginkgo.By("Verifying the kubelet on instance node is up") | ||||
| 		err = waitForKubeletUp(ctx, instanceAddress) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		for i, pod := range pods { | ||||
| 			volumePath := volumePaths[i] | ||||
| 			nodeName := pod.Spec.NodeName | ||||
|  | ||||
| 			ginkgo.By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName)) | ||||
| 			expectVolumeToBeAttached(ctx, nodeName, volumePath) | ||||
|  | ||||
| 			ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) | ||||
| 			err = e2epod.DeletePodWithWait(ctx, client, pod) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) | ||||
| 			err = waitForVSphereDiskToDetach(ctx, volumePath, nodeName) | ||||
| 			framework.ExpectNoError(err) | ||||
|  | ||||
| 			ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) | ||||
| 			err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) | ||||
| 			framework.ExpectNoError(err) | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,124 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
|  | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| var _ = utils.SIGDescribe("Node Unregister", feature.Vsphere, framework.WithSlow(), framework.WithDisruptive(), func() { | ||||
| 	f := framework.NewDefaultFramework("node-unregister") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		client     clientset.Interface | ||||
| 		namespace  string | ||||
| 		workingDir string | ||||
| 		err        error | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		workingDir = GetAndExpectStringEnvVar("VSPHERE_WORKING_DIR") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("node unregister", func(ctx context.Context) { | ||||
| 		ginkgo.By("Get total Ready nodes") | ||||
| 		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		if len(nodeList.Items) < 2 { | ||||
| 			framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items)) | ||||
| 		} | ||||
|  | ||||
| 		totalNodesCount := len(nodeList.Items) | ||||
| 		nodeVM := nodeList.Items[0] | ||||
|  | ||||
| 		nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeVM.ObjectMeta.Name) | ||||
| 		vmObject := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) | ||||
|  | ||||
| 		// Find VM .vmx file path, host, resource pool. | ||||
| 		// They are required to register a node VM to VC | ||||
| 		vmxFilePath := getVMXFilePath(ctx, vmObject) | ||||
|  | ||||
| 		vmHost, err := vmObject.HostSystem(ctx) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		vmPool, err := vmObject.ResourcePool(ctx) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		// Unregister Node VM | ||||
| 		ginkgo.By("Unregister a node VM") | ||||
| 		unregisterNodeVM(ctx, nodeVM.ObjectMeta.Name, vmObject) | ||||
|  | ||||
| 		// Ready nodes should be 1 less | ||||
| 		ginkgo.By("Verifying the ready node counts") | ||||
| 		if !verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount-1) { | ||||
| 			framework.Failf("Unable to verify expected ready node count. Total Nodes: %d, Expected Ready Nodes: %d", totalNodesCount, totalNodesCount-1) | ||||
| 		} | ||||
|  | ||||
| 		nodeList, err = e2enode.GetReadySchedulableNodes(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		var nodeNameList []string | ||||
| 		for _, node := range nodeList.Items { | ||||
| 			nodeNameList = append(nodeNameList, node.ObjectMeta.Name) | ||||
| 		} | ||||
| 		gomega.Expect(nodeNameList).NotTo(gomega.ContainElement(nodeVM.ObjectMeta.Name)) | ||||
|  | ||||
| 		// Register Node VM | ||||
| 		ginkgo.By("Register back the node VM") | ||||
| 		registerNodeVM(ctx, nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost) | ||||
|  | ||||
| 		// Ready nodes should be equal to earlier count | ||||
| 		ginkgo.By("Verifying the ready node counts") | ||||
| 		if !verifyReadyNodeCount(ctx, f.ClientSet, totalNodesCount) { | ||||
| 			framework.Failf("Unable to verify expected ready node count. Total Nodes: %d, Expected Ready Nodes: %d", totalNodesCount, totalNodesCount) | ||||
| 		} | ||||
|  | ||||
| 		nodeList, err = e2enode.GetReadySchedulableNodes(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		nodeNameList = nodeNameList[:0] | ||||
| 		for _, node := range nodeList.Items { | ||||
| 			nodeNameList = append(nodeNameList, node.ObjectMeta.Name) | ||||
| 		} | ||||
| 		gomega.Expect(nodeNameList).To(gomega.ContainElement(nodeVM.ObjectMeta.Name)) | ||||
|  | ||||
| 		// Sanity test that pod provisioning works | ||||
| 		ginkgo.By("Sanity check for volume lifecycle") | ||||
| 		scParameters := make(map[string]string) | ||||
| 		storagePolicy := GetAndExpectStringEnvVar("VSPHERE_SPBM_GOLD_POLICY") | ||||
| 		scParameters[SpbmStoragePolicy] = storagePolicy | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,192 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	"github.com/vmware/govmomi/object" | ||||
| 	vimtypes "github.com/vmware/govmomi/vim25/types" | ||||
|  | ||||
| 	appsv1 "k8s.io/api/apps/v1" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/wait" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| Test to verify volume status after node power off: | ||||
| 1. Verify the pod got provisioned on a different node with volume attached to it | ||||
| 2. Verify the volume is detached from the powered off node | ||||
| */ | ||||
| var _ = utils.SIGDescribe("Node Poweroff", feature.Vsphere, framework.WithSlow(), framework.WithDisruptive(), func() { | ||||
| 	f := framework.NewDefaultFramework("node-poweroff") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		client    clientset.Interface | ||||
| 		namespace string | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) | ||||
| 		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		if len(nodeList.Items) < 2 { | ||||
| 			framework.Failf("At least 2 nodes are required for this test, got instead: %v", len(nodeList.Items)) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Steps: | ||||
| 		1. Create a StorageClass | ||||
| 		2. Create a PVC with the StorageClass | ||||
| 		3. Create a Deployment with 1 replica, using the PVC | ||||
| 		4. Verify the pod got provisioned on a node | ||||
| 		5. Verify the volume is attached to the node | ||||
| 		6. Power off the node where pod got provisioned | ||||
| 		7. Verify the pod got provisioned on a different node | ||||
| 		8. Verify the volume is attached to the new node | ||||
| 		9. Verify the volume is detached from the old node | ||||
| 		10. Delete the Deployment and wait for the volume to be detached | ||||
| 		11. Delete the PVC | ||||
| 		12. Delete the StorageClass | ||||
| 	*/ | ||||
| 	ginkgo.It("verify volume status after node power off", func(ctx context.Context) { | ||||
| 		ginkgo.By("Creating a Storage Class") | ||||
| 		storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "") | ||||
| 		storageclass, err := client.StorageV1().StorageClasses().Create(ctx, storageClassSpec, metav1.CreateOptions{}) | ||||
| 		framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 		ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 		ginkgo.By("Creating PVC using the Storage Class") | ||||
| 		pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) | ||||
| 		pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, pvclaimSpec) | ||||
| 		framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err)) | ||||
| 		ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 		ginkgo.By("Waiting for PVC to be in bound phase") | ||||
| 		pvclaims := []*v1.PersistentVolumeClaim{pvclaim} | ||||
| 		pvs, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) | ||||
| 		framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err)) | ||||
| 		volumePath := pvs[0].Spec.VsphereVolume.VolumePath | ||||
|  | ||||
| 		ginkgo.By("Creating a Deployment") | ||||
| 		deployment, err := e2edeployment.CreateDeployment(ctx, client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, admissionapi.LevelRestricted, "") | ||||
| 		framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) | ||||
| 		ginkgo.DeferCleanup(framework.IgnoreNotFound(client.AppsV1().Deployments(namespace).Delete), deployment.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 		ginkgo.By("Get pod from the deployment") | ||||
| 		podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment) | ||||
| 		framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployment with err: %v", err)) | ||||
| 		gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) | ||||
| 		pod := podList.Items[0] | ||||
| 		node1 := pod.Spec.NodeName | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) | ||||
| 		isAttached, err := diskIsAttached(ctx, volumePath, node1) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		if !isAttached { | ||||
| 			framework.Failf("Volume: %s is not attached to the node: %v", volumePath, node1) | ||||
| 		} | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Power off the node: %v", node1)) | ||||
|  | ||||
| 		nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1) | ||||
| 		vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) | ||||
| 		_, err = vm.PowerOff(ctx) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		ginkgo.DeferCleanup(vm.PowerOn) | ||||
|  | ||||
| 		err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOff) | ||||
| 		framework.ExpectNoError(err, "Unable to power off the node") | ||||
|  | ||||
| 		// Waiting for the pod to be failed over to a different node | ||||
| 		node2, err := waitForPodToFailover(ctx, client, deployment, node1) | ||||
| 		framework.ExpectNoError(err, "Pod did not fail over to a different node") | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) | ||||
| 		err = waitForVSphereDiskToAttach(ctx, volumePath, node2) | ||||
| 		framework.ExpectNoError(err, "Disk is not attached to the node") | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) | ||||
| 		err = waitForVSphereDiskToDetach(ctx, volumePath, node1) | ||||
| 		framework.ExpectNoError(err, "Disk is not detached from the node") | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Power on the previous node: %v", node1)) | ||||
| 		vm.PowerOn(ctx) | ||||
| 		err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn) | ||||
| 		framework.ExpectNoError(err, "Unable to power on the node") | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| // Wait until the pod failed over to a different node, or time out after 3 minutes | ||||
| func waitForPodToFailover(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment, oldNode string) (string, error) { | ||||
| 	var ( | ||||
| 		timeout  = 3 * time.Minute | ||||
| 		pollTime = 10 * time.Second | ||||
| 	) | ||||
|  | ||||
| 	waitErr := wait.PollWithContext(ctx, pollTime, timeout, func(ctx context.Context) (bool, error) { | ||||
| 		currentNode, err := getNodeForDeployment(ctx, client, deployment) | ||||
| 		if err != nil { | ||||
| 			return true, err | ||||
| 		} | ||||
|  | ||||
| 		if currentNode != oldNode { | ||||
| 			framework.Logf("The pod has been failed over from %q to %q", oldNode, currentNode) | ||||
| 			return true, nil | ||||
| 		} | ||||
|  | ||||
| 		framework.Logf("Waiting for pod to be failed over from %q", oldNode) | ||||
| 		return false, nil | ||||
| 	}) | ||||
|  | ||||
| 	if waitErr != nil { | ||||
| 		if wait.Interrupted(waitErr) { | ||||
| 			return "", fmt.Errorf("pod has not failed over after %v: %v", timeout, waitErr) | ||||
| 		} | ||||
| 		return "", fmt.Errorf("pod did not fail over from %q: %v", oldNode, waitErr) | ||||
| 	} | ||||
|  | ||||
| 	return getNodeForDeployment(ctx, client, deployment) | ||||
| } | ||||
|  | ||||
| // getNodeForDeployment returns node name for the Deployment | ||||
| func getNodeForDeployment(ctx context.Context, client clientset.Interface, deployment *appsv1.Deployment) (string, error) { | ||||
| 	podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	return podList.Items[0].Spec.NodeName, nil | ||||
| } | ||||
| @@ -1,128 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"strconv" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	storagev1 "k8s.io/api/storage/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| 	Test to perform Disk Ops storm. | ||||
|  | ||||
| 	Steps | ||||
|     	1. Create storage class for thin Provisioning. | ||||
|     	2. Create 30 PVCs using above storage class in annotation, requesting 2 GB files. | ||||
|     	3. Wait until all disks are ready and all PVs and PVCs get bind. (CreateVolume storm) | ||||
|     	4. Create pod to mount volumes using PVCs created in step 2. (AttachDisk storm) | ||||
|     	5. Wait for pod status to be running. | ||||
|     	6. Verify all volumes accessible and available in the pod. | ||||
|     	7. Delete pod. | ||||
|     	8. wait until volumes gets detached. (DetachDisk storm) | ||||
|     	9. Delete all PVCs. This should delete all Disks. (DeleteVolume storm) | ||||
| 		10. Delete storage class. | ||||
| */ | ||||
|  | ||||
| var _ = utils.SIGDescribe("Volume Operations Storm", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-ops-storm") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	const defaultVolumeOpsScale = 30 | ||||
| 	var ( | ||||
| 		client            clientset.Interface | ||||
| 		namespace         string | ||||
| 		storageclass      *storagev1.StorageClass | ||||
| 		pvclaims          []*v1.PersistentVolumeClaim | ||||
| 		persistentvolumes []*v1.PersistentVolume | ||||
| 		err               error | ||||
| 		volumeOpsScale    int | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		gomega.Expect(GetReadySchedulableNodeInfos(ctx, client)).NotTo(gomega.BeEmpty()) | ||||
| 		if scale := os.Getenv("VOLUME_OPS_SCALE"); scale != "" { | ||||
| 			volumeOpsScale, err = strconv.Atoi(scale) | ||||
| 			framework.ExpectNoError(err) | ||||
| 		} else { | ||||
| 			volumeOpsScale = defaultVolumeOpsScale | ||||
| 		} | ||||
| 		pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale) | ||||
| 	}) | ||||
| 	ginkgo.AfterEach(func(ctx context.Context) { | ||||
| 		ginkgo.By("Deleting PVCs") | ||||
| 		for _, claim := range pvclaims { | ||||
| 			_ = e2epv.DeletePersistentVolumeClaim(ctx, client, claim.Name, namespace) | ||||
| 		} | ||||
| 		ginkgo.By("Deleting StorageClass") | ||||
| 		err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, metav1.DeleteOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("should create pod with many volumes and verify no attach call fails", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volumeOpsScale)) | ||||
| 		ginkgo.By("Creating Storage Class") | ||||
| 		scParameters := make(map[string]string) | ||||
| 		scParameters["diskformat"] = "thin" | ||||
| 		storageclass, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("thinsc", scParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Creating PVCs using the Storage Class") | ||||
| 		count := 0 | ||||
| 		for count < volumeOpsScale { | ||||
| 			pvclaims[count], err = e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			count++ | ||||
| 		} | ||||
|  | ||||
| 		ginkgo.By("Waiting for all claims to be in bound phase") | ||||
| 		persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Creating pod to attach PVs to the node") | ||||
| 		pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 		framework.ExpectNoError(err) | ||||
|  | ||||
| 		ginkgo.By("Verify all volumes are accessible and available in the pod") | ||||
| 		verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) | ||||
|  | ||||
| 		ginkgo.By("Deleting pod") | ||||
| 		framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod)) | ||||
|  | ||||
| 		ginkgo.By("Waiting for volumes to be detached from the node") | ||||
| 		for _, pv := range persistentvolumes { | ||||
| 			framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,244 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	storagev1 "k8s.io/api/storage/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| 	This test calculates latency numbers for volume lifecycle operations | ||||
|  | ||||
| 1. Create 4 type of storage classes | ||||
| 2. Read the total number of volumes to be created and volumes per pod | ||||
| 3. Create total PVCs (number of volumes) | ||||
| 4. Create Pods with attached volumes per pod | ||||
| 5. Verify access to the volumes | ||||
| 6. Delete pods and wait for volumes to detach | ||||
| 7. Delete the PVCs | ||||
| */ | ||||
| const ( | ||||
| 	SCSIUnitsAvailablePerNode = 55 | ||||
| 	CreateOp                  = "CreateOp" | ||||
| 	AttachOp                  = "AttachOp" | ||||
| 	DetachOp                  = "DetachOp" | ||||
| 	DeleteOp                  = "DeleteOp" | ||||
| ) | ||||
|  | ||||
| var _ = utils.SIGDescribe("vcp-performance", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("vcp-performance") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
|  | ||||
| 	var ( | ||||
| 		client           clientset.Interface | ||||
| 		namespace        string | ||||
| 		nodeSelectorList []*NodeSelector | ||||
| 		policyName       string | ||||
| 		datastoreName    string | ||||
| 		volumeCount      int | ||||
| 		volumesPerPod    int | ||||
| 		iterations       int | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
|  | ||||
| 		// Read the environment variables | ||||
| 		volumeCount = GetAndExpectIntEnvVar(VCPPerfVolumeCount) | ||||
| 		volumesPerPod = GetAndExpectIntEnvVar(VCPPerfVolumesPerPod) | ||||
| 		iterations = GetAndExpectIntEnvVar(VCPPerfIterations) | ||||
|  | ||||
| 		policyName = GetAndExpectStringEnvVar(SPBMPolicyName) | ||||
| 		datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) | ||||
|  | ||||
| 		nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		gomega.Expect(nodes.Items).ToNot(gomega.BeEmpty(), "Requires at least one ready node") | ||||
|  | ||||
| 		msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items)) | ||||
| 		gomega.Expect(volumeCount).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg) | ||||
|  | ||||
| 		msg = fmt.Sprintf("Cannot attach %d volumes per pod. Maximum volumes that can be attached per pod is %d", volumesPerPod, SCSIUnitsAvailablePerNode) | ||||
| 		gomega.Expect(volumesPerPod).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode), msg) | ||||
|  | ||||
| 		nodeSelectorList = createNodeLabels(client, namespace, nodes) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("vcp performance tests", func(ctx context.Context) { | ||||
| 		scList := getTestStorageClasses(ctx, client, policyName, datastoreName) | ||||
| 		for _, sc := range scList { | ||||
| 			ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{}) | ||||
| 		} | ||||
|  | ||||
| 		sumLatency := make(map[string]float64) | ||||
| 		for i := 0; i < iterations; i++ { | ||||
| 			latency := invokeVolumeLifeCyclePerformance(ctx, f, client, namespace, scList, volumesPerPod, volumeCount, nodeSelectorList) | ||||
| 			for key, val := range latency { | ||||
| 				sumLatency[key] += val | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		iterations64 := float64(iterations) | ||||
| 		framework.Logf("Average latency for below operations") | ||||
| 		framework.Logf("Creating %d PVCs and waiting for bound phase: %v seconds", volumeCount, sumLatency[CreateOp]/iterations64) | ||||
| 		framework.Logf("Creating %v Pod: %v seconds", volumeCount/volumesPerPod, sumLatency[AttachOp]/iterations64) | ||||
| 		framework.Logf("Deleting %v Pod and waiting for disk to be detached: %v seconds", volumeCount/volumesPerPod, sumLatency[DetachOp]/iterations64) | ||||
| 		framework.Logf("Deleting %v PVCs: %v seconds", volumeCount, sumLatency[DeleteOp]/iterations64) | ||||
|  | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func getTestStorageClasses(ctx context.Context, client clientset.Interface, policyName, datastoreName string) []*storagev1.StorageClass { | ||||
| 	const ( | ||||
| 		storageclass1 = "sc-default" | ||||
| 		storageclass2 = "sc-vsan" | ||||
| 		storageclass3 = "sc-spbm" | ||||
| 		storageclass4 = "sc-user-specified-ds" | ||||
| 	) | ||||
| 	scNames := []string{storageclass1, storageclass2, storageclass3, storageclass4} | ||||
| 	scArrays := make([]*storagev1.StorageClass, len(scNames)) | ||||
| 	for index, scname := range scNames { | ||||
| 		// Create vSphere Storage Class | ||||
| 		ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname)) | ||||
| 		var sc *storagev1.StorageClass | ||||
| 		var err error | ||||
| 		switch scname { | ||||
| 		case storageclass1: | ||||
| 			sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{}) | ||||
| 		case storageclass2: | ||||
| 			var scVSanParameters map[string]string | ||||
| 			scVSanParameters = make(map[string]string) | ||||
| 			scVSanParameters[PolicyHostFailuresToTolerate] = "1" | ||||
| 			sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 		case storageclass3: | ||||
| 			var scSPBMPolicyParameters map[string]string | ||||
| 			scSPBMPolicyParameters = make(map[string]string) | ||||
| 			scSPBMPolicyParameters[SpbmStoragePolicy] = policyName | ||||
| 			sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 		case storageclass4: | ||||
| 			var scWithDSParameters map[string]string | ||||
| 			scWithDSParameters = make(map[string]string) | ||||
| 			scWithDSParameters[Datastore] = datastoreName | ||||
| 			scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "") | ||||
| 			sc, err = client.StorageV1().StorageClasses().Create(ctx, scWithDatastoreSpec, metav1.CreateOptions{}) | ||||
| 		} | ||||
| 		gomega.Expect(sc).NotTo(gomega.BeNil()) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		scArrays[index] = sc | ||||
| 	} | ||||
| 	return scArrays | ||||
| } | ||||
|  | ||||
| // invokeVolumeLifeCyclePerformance peforms full volume life cycle management and records latency for each operation | ||||
| func invokeVolumeLifeCyclePerformance(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumesPerPod int, volumeCount int, nodeSelectorList []*NodeSelector) (latency map[string]float64) { | ||||
| 	var ( | ||||
| 		totalpvclaims [][]*v1.PersistentVolumeClaim | ||||
| 		totalpvs      [][]*v1.PersistentVolume | ||||
| 		totalpods     []*v1.Pod | ||||
| 	) | ||||
| 	nodeVolumeMap := make(map[string][]string) | ||||
| 	latency = make(map[string]float64) | ||||
| 	numPods := volumeCount / volumesPerPod | ||||
|  | ||||
| 	ginkgo.By(fmt.Sprintf("Creating %d PVCs", volumeCount)) | ||||
| 	start := time.Now() | ||||
| 	for i := 0; i < numPods; i++ { | ||||
| 		var pvclaims []*v1.PersistentVolumeClaim | ||||
| 		for j := 0; j < volumesPerPod; j++ { | ||||
| 			currsc := sc[((i*numPods)+j)%len(sc)] | ||||
| 			pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", currsc)) | ||||
| 			framework.ExpectNoError(err) | ||||
| 			pvclaims = append(pvclaims, pvclaim) | ||||
| 		} | ||||
| 		totalpvclaims = append(totalpvclaims, pvclaims) | ||||
| 	} | ||||
| 	for _, pvclaims := range totalpvclaims { | ||||
| 		persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		totalpvs = append(totalpvs, persistentvolumes) | ||||
| 	} | ||||
| 	elapsed := time.Since(start) | ||||
| 	latency[CreateOp] = elapsed.Seconds() | ||||
|  | ||||
| 	ginkgo.By("Creating pod to attach PVs to the node") | ||||
| 	start = time.Now() | ||||
| 	for i, pvclaims := range totalpvclaims { | ||||
| 		nodeSelector := nodeSelectorList[i%len(nodeSelectorList)] | ||||
| 		pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 		framework.ExpectNoError(err) | ||||
| 		totalpods = append(totalpods, pod) | ||||
|  | ||||
| 		ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod) | ||||
| 	} | ||||
| 	elapsed = time.Since(start) | ||||
| 	latency[AttachOp] = elapsed.Seconds() | ||||
|  | ||||
| 	for i, pod := range totalpods { | ||||
| 		verifyVSphereVolumesAccessible(ctx, client, pod, totalpvs[i]) | ||||
| 	} | ||||
|  | ||||
| 	ginkgo.By("Deleting pods") | ||||
| 	start = time.Now() | ||||
| 	for _, pod := range totalpods { | ||||
| 		err := e2epod.DeletePodWithWait(ctx, client, pod) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	} | ||||
| 	elapsed = time.Since(start) | ||||
| 	latency[DetachOp] = elapsed.Seconds() | ||||
|  | ||||
| 	for i, pod := range totalpods { | ||||
| 		for _, pv := range totalpvs[i] { | ||||
| 			nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	err := waitForVSphereDisksToDetach(ctx, nodeVolumeMap) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.By("Deleting the PVCs") | ||||
| 	start = time.Now() | ||||
| 	for _, pvclaims := range totalpvclaims { | ||||
| 		for _, pvc := range pvclaims { | ||||
| 			err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvc.Name, namespace) | ||||
| 			framework.ExpectNoError(err) | ||||
| 		} | ||||
| 	} | ||||
| 	elapsed = time.Since(start) | ||||
| 	latency[DeleteOp] = elapsed.Seconds() | ||||
|  | ||||
| 	return latency | ||||
| } | ||||
| @@ -1,391 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/uuid" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| var _ = utils.SIGDescribe("Volume Placement", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-placement") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	const ( | ||||
| 		NodeLabelKey = "vsphere_e2e_label_volume_placement" | ||||
| 	) | ||||
| 	var ( | ||||
| 		c                  clientset.Interface | ||||
| 		ns                 string | ||||
| 		volumePaths        []string | ||||
| 		node1Name          string | ||||
| 		node1KeyValueLabel map[string]string | ||||
| 		node2Name          string | ||||
| 		node2KeyValueLabel map[string]string | ||||
| 		nodeInfo           *NodeInfo | ||||
| 		vsp                *VSphere | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		c = f.ClientSet | ||||
| 		ns = f.Namespace.Name | ||||
| 		framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) | ||||
| 		node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(ctx, c, ns) | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			if len(node1KeyValueLabel) > 0 { | ||||
| 				e2enode.RemoveLabelOffNode(c, node1Name, NodeLabelKey) | ||||
| 			} | ||||
| 			if len(node2KeyValueLabel) > 0 { | ||||
| 				e2enode.RemoveLabelOffNode(c, node2Name, NodeLabelKey) | ||||
| 			} | ||||
| 		}) | ||||
| 		nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) | ||||
| 		vsp = nodeInfo.VSphere | ||||
| 		ginkgo.By("creating vmdk") | ||||
| 		volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		volumePaths = append(volumePaths, volumePath) | ||||
| 		ginkgo.DeferCleanup(func() { | ||||
| 			for _, volumePath := range volumePaths { | ||||
| 				vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef) | ||||
| 			} | ||||
| 			volumePaths = nil | ||||
| 		}) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Steps | ||||
|  | ||||
| 		1. Create pod Spec with volume path of the vmdk and NodeSelector set to label assigned to node1. | ||||
| 		2. Create pod and wait for pod to become ready. | ||||
| 		3. Verify volume is attached to the node1. | ||||
| 		4. Create empty file on the volume to verify volume is writable. | ||||
| 		5. Verify newly created file and previously created files exist on the volume. | ||||
| 		6. Delete pod. | ||||
| 		7. Wait for volume to be detached from the node1. | ||||
| 		8. Repeat Step 1 to 7 and make sure back to back pod creation on same worker node with the same volume is working as expected. | ||||
|  | ||||
| 	*/ | ||||
|  | ||||
| 	ginkgo.It("should create and delete pod with the same volume source on the same worker node", func(ctx context.Context) { | ||||
| 		var volumeFiles []string | ||||
| 		pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) | ||||
|  | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns) | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFileName) | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) | ||||
| 		deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) | ||||
| 		pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) | ||||
|  | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFileName) | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) | ||||
| 		deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Steps | ||||
|  | ||||
| 		1. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node1's label. | ||||
| 		2. Create pod and wait for POD to become ready. | ||||
| 		3. Verify volume is attached to the node1. | ||||
| 		4. Create empty file on the volume to verify volume is writable. | ||||
| 		5. Verify newly created file and previously created files exist on the volume. | ||||
| 		6. Delete pod. | ||||
| 		7. Wait for volume to be detached from the node1. | ||||
| 		8. Create pod Spec with volume path of the vmdk1 and NodeSelector set to node2's label. | ||||
| 		9. Create pod and wait for pod to become ready. | ||||
| 		10. Verify volume is attached to the node2. | ||||
| 		11. Create empty file on the volume to verify volume is writable. | ||||
| 		12. Verify newly created file and previously created files exist on the volume. | ||||
| 		13. Delete pod. | ||||
| 	*/ | ||||
|  | ||||
| 	ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func(ctx context.Context) { | ||||
| 		var volumeFiles []string | ||||
| 		pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns) | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFileName) | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) | ||||
| 		deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) | ||||
| 		pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node2Name, node2KeyValueLabel, volumePaths) | ||||
|  | ||||
| 		newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFileName) | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) | ||||
| 		deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node2Name, volumePaths) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Test multiple volumes from same datastore within the same pod | ||||
| 		1. Create volumes - vmdk2 | ||||
| 		2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup) and vmdk2. | ||||
| 		3. Create pod using spec created in step-2 and wait for pod to become ready. | ||||
| 		4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible. | ||||
| 		5. Delete pod. | ||||
| 		6. Wait for vmdk1 and vmdk2 to be detached from node. | ||||
| 		7. Create pod using spec created in step-2 and wait for pod to become ready. | ||||
| 		8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4. | ||||
| 		9. Delete POD. | ||||
| 		10. Wait for vmdk1 and vmdk2 to be detached from node. | ||||
| 	*/ | ||||
|  | ||||
| 	ginkgo.It("should create and delete pod with multiple volumes from same datastore", func(ctx context.Context) { | ||||
| 		ginkgo.By("creating another vmdk") | ||||
| 		volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		volumePaths = append(volumePaths, volumePath) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) | ||||
| 		pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		volumeFiles := []string{ | ||||
| 			fmt.Sprintf("/mnt/volume1/%v_1.txt", ns), | ||||
| 			fmt.Sprintf("/mnt/volume2/%v_1.txt", ns), | ||||
| 		} | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) | ||||
| 		deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) | ||||
| 		ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) | ||||
| 		pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		newEmptyFilesNames := []string{ | ||||
| 			fmt.Sprintf("/mnt/volume1/%v_2.txt", ns), | ||||
| 			fmt.Sprintf("/mnt/volume2/%v_2.txt", ns), | ||||
| 		} | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFilesNames[0]) | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFilesNames[1]) | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFilesNames, volumeFiles) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Test multiple volumes from different datastore within the same pod | ||||
| 		1. Create volumes - vmdk2 on non default shared datastore. | ||||
| 		2. Create pod Spec with volume path of vmdk1 (vmdk1 is created in test setup on default datastore) and vmdk2. | ||||
| 		3. Create pod using spec created in step-2 and wait for pod to become ready. | ||||
| 		4. Verify both volumes are attached to the node on which pod are created. Write some data to make sure volume are accessible. | ||||
| 		5. Delete pod. | ||||
| 		6. Wait for vmdk1 and vmdk2 to be detached from node. | ||||
| 		7. Create pod using spec created in step-2 and wait for pod to become ready. | ||||
| 		8. Verify both volumes are attached to the node on which PODs are created. Verify volume contents are matching with the content written in step 4. | ||||
| 		9. Delete POD. | ||||
| 		10. Wait for vmdk1 and vmdk2 to be detached from node. | ||||
| 	*/ | ||||
| 	ginkgo.It("should create and delete pod with multiple volumes from different datastore", func(ctx context.Context) { | ||||
| 		ginkgo.By("creating another vmdk on non default shared datastore") | ||||
| 		var volumeOptions *VolumeOptions | ||||
| 		volumeOptions = new(VolumeOptions) | ||||
| 		volumeOptions.CapacityKB = 2097152 | ||||
| 		volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10) | ||||
| 		volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore) | ||||
| 		volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef) | ||||
|  | ||||
| 		framework.ExpectNoError(err) | ||||
| 		volumePaths = append(volumePaths, volumePath) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v  and volume: %v", node1Name, volumePaths[0], volumePaths[1])) | ||||
| 		pod := createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) | ||||
|  | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		volumeFiles := []string{ | ||||
| 			fmt.Sprintf("/mnt/volume1/%v_1.txt", ns), | ||||
| 			fmt.Sprintf("/mnt/volume2/%v_1.txt", ns), | ||||
| 		} | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) | ||||
| 		deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) | ||||
|  | ||||
| 		ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v  and volume: %v", node1Name, volumePaths[0], volumePaths[1])) | ||||
| 		pod = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, volumePaths) | ||||
| 		// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 		// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 		newEmptyFileNames := []string{ | ||||
| 			fmt.Sprintf("/mnt/volume1/%v_2.txt", ns), | ||||
| 			fmt.Sprintf("/mnt/volume2/%v_2.txt", ns), | ||||
| 		} | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFileNames[0]) | ||||
| 		volumeFiles = append(volumeFiles, newEmptyFileNames[1]) | ||||
| 		createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles) | ||||
| 		deletePodAndWaitForVolumeToDetach(ctx, f, c, pod, node1Name, volumePaths) | ||||
| 	}) | ||||
|  | ||||
| 	/* | ||||
| 		Test Back-to-back pod creation/deletion with different volume sources on the same worker node | ||||
| 		    1. Create volumes - vmdk2 | ||||
| 		    2. Create pod Spec - pod-SpecA with volume path of vmdk1 and NodeSelector set to label assigned to node1. | ||||
| 		    3. Create pod Spec - pod-SpecB with volume path of vmdk2 and NodeSelector set to label assigned to node1. | ||||
| 		    4. Create pod-A using pod-SpecA and wait for pod to become ready. | ||||
| 		    5. Create pod-B using pod-SpecB and wait for POD to become ready. | ||||
| 		    6. Verify volumes are attached to the node. | ||||
| 		    7. Create empty file on the volume to make sure volume is accessible. (Perform this step on pod-A and pod-B) | ||||
| 		    8. Verify file created in step 5 is present on the volume. (perform this step on pod-A and pod-B) | ||||
| 		    9. Delete pod-A and pod-B | ||||
| 		    10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching. | ||||
| 		    11. Wait for vmdk1 and vmdk2 to be detached from node. | ||||
| 	*/ | ||||
| 	ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func(ctx context.Context) { | ||||
| 		var ( | ||||
| 			podA                *v1.Pod | ||||
| 			podB                *v1.Pod | ||||
| 			testvolumePathsPodA []string | ||||
| 			testvolumePathsPodB []string | ||||
| 			podAFiles           []string | ||||
| 			podBFiles           []string | ||||
| 		) | ||||
|  | ||||
| 		defer func() { | ||||
| 			ginkgo.By("clean up undeleted pods") | ||||
| 			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podA), "defer: Failed to delete pod ", podA.Name) | ||||
| 			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podB), "defer: Failed to delete pod ", podB.Name) | ||||
| 			ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) | ||||
| 			for _, volumePath := range volumePaths { | ||||
| 				framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volumePath, node1Name)) | ||||
| 			} | ||||
| 		}() | ||||
|  | ||||
| 		testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0]) | ||||
| 		// Create another VMDK Volume | ||||
| 		ginkgo.By("creating another vmdk") | ||||
| 		volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		volumePaths = append(volumePaths, volumePath) | ||||
| 		testvolumePathsPodB = append(testvolumePathsPodA, volumePath) | ||||
|  | ||||
| 		for index := 0; index < 5; index++ { | ||||
| 			ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) | ||||
| 			podA = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA) | ||||
|  | ||||
| 			ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) | ||||
| 			podB = createPodWithVolumeAndNodeSelector(ctx, c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB) | ||||
|  | ||||
| 			podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1) | ||||
| 			podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1) | ||||
| 			podAFiles = append(podAFiles, podAFileName) | ||||
| 			podBFiles = append(podBFiles, podBFileName) | ||||
|  | ||||
| 			// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 			ginkgo.By("Creating empty file on volume mounted on pod-A") | ||||
| 			e2eoutput.CreateEmptyFileOnPod(ns, podA.Name, podAFileName) | ||||
|  | ||||
| 			ginkgo.By("Creating empty file volume mounted on pod-B") | ||||
| 			e2eoutput.CreateEmptyFileOnPod(ns, podB.Name, podBFileName) | ||||
|  | ||||
| 			// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 			ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A") | ||||
| 			verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...) | ||||
| 			ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-B") | ||||
| 			verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...) | ||||
|  | ||||
| 			ginkgo.By("Deleting pod-A") | ||||
| 			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podA), "Failed to delete pod ", podA.Name) | ||||
| 			ginkgo.By("Deleting pod-B") | ||||
| 			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, podB), "Failed to delete pod ", podB.Name) | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func testSetupVolumePlacement(ctx context.Context, client clientset.Interface, namespace string) (node1Name string, node1KeyValueLabel map[string]string, node2Name string, node2KeyValueLabel map[string]string) { | ||||
| 	nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, client, 2) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	if len(nodes.Items) < 2 { | ||||
| 		e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) | ||||
| 	} | ||||
| 	node1Name = nodes.Items[0].Name | ||||
| 	node2Name = nodes.Items[1].Name | ||||
| 	node1LabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) | ||||
| 	node1KeyValueLabel = make(map[string]string) | ||||
| 	node1KeyValueLabel[NodeLabelKey] = node1LabelValue | ||||
| 	e2enode.AddOrUpdateLabelOnNode(client, node1Name, NodeLabelKey, node1LabelValue) | ||||
|  | ||||
| 	node2LabelValue := "vsphere_e2e_" + string(uuid.NewUUID()) | ||||
| 	node2KeyValueLabel = make(map[string]string) | ||||
| 	node2KeyValueLabel[NodeLabelKey] = node2LabelValue | ||||
| 	e2enode.AddOrUpdateLabelOnNode(client, node2Name, NodeLabelKey, node2LabelValue) | ||||
| 	return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel | ||||
| } | ||||
|  | ||||
| func createPodWithVolumeAndNodeSelector(ctx context.Context, client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod { | ||||
| 	var pod *v1.Pod | ||||
| 	var err error | ||||
| 	ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) | ||||
| 	podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) | ||||
|  | ||||
| 	pod, err = client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.By("Waiting for pod to be ready") | ||||
| 	gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) | ||||
|  | ||||
| 	ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) | ||||
| 	for _, volumePath := range volumePaths { | ||||
| 		isAttached, err := diskIsAttached(ctx, volumePath, nodeName) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		if !isAttached { | ||||
| 			framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName) | ||||
| 		} | ||||
| 	} | ||||
| 	return pod | ||||
| } | ||||
|  | ||||
| func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) { | ||||
| 	// Create empty files on the mounted volumes on the pod to verify volume is writable | ||||
| 	ginkgo.By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname)) | ||||
| 	createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate) | ||||
|  | ||||
| 	// Verify newly and previously created files present on the volume mounted on the pod | ||||
| 	ginkgo.By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname)) | ||||
| 	verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...) | ||||
| } | ||||
|  | ||||
| func deletePodAndWaitForVolumeToDetach(ctx context.Context, f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) { | ||||
| 	ginkgo.By("Deleting pod") | ||||
| 	framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod), "Failed to delete pod ", pod.Name) | ||||
|  | ||||
| 	ginkgo.By("Waiting for volume to be detached from the node") | ||||
| 	for _, volumePath := range volumePaths { | ||||
| 		framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, volumePath, nodeName)) | ||||
| 	} | ||||
| } | ||||
| @@ -1,185 +0,0 @@ | ||||
| /* | ||||
| Copyright 2018 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	"k8s.io/apimachinery/pkg/util/uuid" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| Test to verify that a volume remains attached through vpxd restart. | ||||
|  | ||||
| For the number of schedulable nodes: | ||||
| 1. Create a Volume with default options. | ||||
| 2. Create a Pod with the created Volume. | ||||
| 3. Verify that the Volume is attached. | ||||
| 4. Create a file with random contents under the Volume's mount point on the Pod. | ||||
| 5. Stop the vpxd service on the vCenter host. | ||||
| 6. Verify that the file is accessible on the Pod and that it's contents match. | ||||
| 7. Start the vpxd service on the vCenter host. | ||||
| 8. Verify that the Volume remains attached, the file is accessible on the Pod, and that it's contents match. | ||||
| 9. Delete the Pod and wait for the Volume to be detached. | ||||
| 10. Delete the Volume. | ||||
| */ | ||||
| var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart", feature.Vsphere, framework.WithSerial(), framework.WithDisruptive(), func() { | ||||
| 	f := framework.NewDefaultFramework("restart-vpxd") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
|  | ||||
| 	type node struct { | ||||
| 		name     string | ||||
| 		kvLabels map[string]string | ||||
| 		nodeInfo *NodeInfo | ||||
| 	} | ||||
|  | ||||
| 	const ( | ||||
| 		labelKey        = "vsphere_e2e_label_vpxd_restart" | ||||
| 		vpxdServiceName = "vmware-vpxd" | ||||
| 	) | ||||
|  | ||||
| 	var ( | ||||
| 		client     clientset.Interface | ||||
| 		namespace  string | ||||
| 		vcNodesMap map[string][]node | ||||
| 	) | ||||
|  | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		// Requires SSH access to vCenter. | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
|  | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, client, f.Timeouts.NodeSchedulable)) | ||||
|  | ||||
| 		nodes, err := e2enode.GetReadySchedulableNodes(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		numNodes := len(nodes.Items) | ||||
|  | ||||
| 		vcNodesMap = make(map[string][]node) | ||||
| 		for i := 0; i < numNodes; i++ { | ||||
| 			nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodes.Items[i].Name) | ||||
| 			nodeName := nodes.Items[i].Name | ||||
| 			nodeLabel := "vsphere_e2e_" + string(uuid.NewUUID()) | ||||
| 			e2enode.AddOrUpdateLabelOnNode(client, nodeName, labelKey, nodeLabel) | ||||
|  | ||||
| 			vcHost := nodeInfo.VSphere.Config.Hostname | ||||
| 			vcNodesMap[vcHost] = append(vcNodesMap[vcHost], node{ | ||||
| 				name:     nodeName, | ||||
| 				kvLabels: map[string]string{labelKey: nodeLabel}, | ||||
| 				nodeInfo: nodeInfo, | ||||
| 			}) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify volume remains attached through vpxd restart", func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessSSHKeyPresent() | ||||
|  | ||||
| 		for vcHost, nodes := range vcNodesMap { | ||||
| 			var ( | ||||
| 				volumePaths  []string | ||||
| 				filePaths    []string | ||||
| 				fileContents []string | ||||
| 				pods         []*v1.Pod | ||||
| 			) | ||||
|  | ||||
| 			framework.Logf("Testing for nodes on vCenter host: %s", vcHost) | ||||
|  | ||||
| 			for i, node := range nodes { | ||||
| 				ginkgo.By(fmt.Sprintf("Creating test vsphere volume %d", i)) | ||||
| 				volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef) | ||||
| 				framework.ExpectNoError(err) | ||||
| 				volumePaths = append(volumePaths, volumePath) | ||||
|  | ||||
| 				ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) | ||||
| 				podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) | ||||
| 				pod, err := client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{}) | ||||
| 				framework.ExpectNoError(err) | ||||
|  | ||||
| 				ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i)) | ||||
| 				gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed()) | ||||
|  | ||||
| 				pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) | ||||
| 				framework.ExpectNoError(err) | ||||
| 				pods = append(pods, pod) | ||||
|  | ||||
| 				nodeName := pod.Spec.NodeName | ||||
| 				ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) | ||||
| 				expectVolumeToBeAttached(ctx, nodeName, volumePath) | ||||
|  | ||||
| 				ginkgo.By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i)) | ||||
| 				filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10)) | ||||
| 				randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10)) | ||||
| 				err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent) | ||||
| 				framework.ExpectNoError(err) | ||||
| 				filePaths = append(filePaths, filePath) | ||||
| 				fileContents = append(fileContents, randomContent) | ||||
| 			} | ||||
|  | ||||
| 			ginkgo.By("Stopping vpxd on the vCenter host") | ||||
| 			vcAddress := vcHost + ":22" | ||||
| 			err := invokeVCenterServiceControl(ctx, "stop", vpxdServiceName, vcAddress) | ||||
| 			framework.ExpectNoError(err, "Unable to stop vpxd on the vCenter host") | ||||
|  | ||||
| 			expectFilesToBeAccessible(namespace, pods, filePaths) | ||||
| 			expectFileContentsToMatch(namespace, pods, filePaths, fileContents) | ||||
|  | ||||
| 			ginkgo.By("Starting vpxd on the vCenter host") | ||||
| 			err = invokeVCenterServiceControl(ctx, "start", vpxdServiceName, vcAddress) | ||||
| 			framework.ExpectNoError(err, "Unable to start vpxd on the vCenter host") | ||||
|  | ||||
| 			expectVolumesToBeAttached(ctx, pods, volumePaths) | ||||
| 			expectFilesToBeAccessible(namespace, pods, filePaths) | ||||
| 			expectFileContentsToMatch(namespace, pods, filePaths, fileContents) | ||||
|  | ||||
| 			for i, node := range nodes { | ||||
| 				pod := pods[i] | ||||
| 				nodeName := pod.Spec.NodeName | ||||
| 				volumePath := volumePaths[i] | ||||
|  | ||||
| 				ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) | ||||
| 				err = e2epod.DeletePodWithWait(ctx, client, pod) | ||||
| 				framework.ExpectNoError(err) | ||||
|  | ||||
| 				ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) | ||||
| 				err = waitForVSphereDiskToDetach(ctx, volumePath, nodeName) | ||||
| 				framework.ExpectNoError(err) | ||||
|  | ||||
| 				ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) | ||||
| 				err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef) | ||||
| 				framework.ExpectNoError(err) | ||||
| 			} | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
| @@ -1,367 +0,0 @@ | ||||
| /* | ||||
| Copyright 2017 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"hash/fnv" | ||||
| 	"regexp" | ||||
| 	"time" | ||||
|  | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	vmfsDatastore               = "sharedVmfs-0" | ||||
| 	vsanDatastore               = "vsanDatastore" | ||||
| 	dummyVMPrefixName           = "vsphere-k8s" | ||||
| 	diskStripesCapabilityMaxVal = "11" | ||||
| ) | ||||
|  | ||||
| /* | ||||
|    Test to verify the storage policy based management for dynamic volume provisioning inside kubernetes. | ||||
|    There are 2 ways to achieve it: | ||||
|    1. Specify VSAN storage capabilities in the storage-class. | ||||
|    2. Use existing vCenter SPBM storage policies. | ||||
|  | ||||
|    Valid VSAN storage capabilities are mentioned below: | ||||
|    1. hostFailuresToTolerate | ||||
|    2. forceProvisioning | ||||
|    3. cacheReservation | ||||
|    4. diskStripes | ||||
|    5. objectSpaceReservation | ||||
|    6. iopsLimit | ||||
|  | ||||
|    Steps | ||||
|    1. Create StorageClass with. | ||||
|    		a. VSAN storage capabilities set to valid/invalid values (or) | ||||
| 		b. Use existing vCenter SPBM storage policies. | ||||
|    2. Create PVC which uses the StorageClass created in step 1. | ||||
|    3. Wait for PV to be provisioned. | ||||
|    4. Wait for PVC's status to become Bound | ||||
|    5. Create pod using PVC on specific node. | ||||
|    6. Wait for Disk to be attached to the node. | ||||
|    7. Delete pod and Wait for Volume Disk to be detached from the Node. | ||||
|    8. Delete PVC, PV and Storage Class | ||||
|  | ||||
|  | ||||
| */ | ||||
|  | ||||
| var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("volume-vsan-policy") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		client       clientset.Interface | ||||
| 		namespace    string | ||||
| 		scParameters map[string]string | ||||
| 		policyName   string | ||||
| 		tagPolicy    string | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		client = f.ClientSet | ||||
| 		namespace = f.Namespace.Name | ||||
| 		policyName = GetAndExpectStringEnvVar(SPBMPolicyName) | ||||
| 		tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy) | ||||
| 		framework.Logf("framework: %+v", f) | ||||
| 		scParameters = make(map[string]string) | ||||
| 		_, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	}) | ||||
|  | ||||
| 	// Valid policy. | ||||
| 	ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) | ||||
| 		scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal | ||||
| 		scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
|  | ||||
| 	// Valid policy. | ||||
| 	ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) | ||||
| 		scParameters[PolicyDiskStripes] = "1" | ||||
| 		scParameters[PolicyObjectSpaceReservation] = "30" | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
|  | ||||
| 	// Valid policy. | ||||
| 	ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) | ||||
| 		scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal | ||||
| 		scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal | ||||
| 		scParameters[Datastore] = vsanDatastore | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
|  | ||||
| 	// Valid policy. | ||||
| 	ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) | ||||
| 		scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal | ||||
| 		scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
|  | ||||
| 	// Invalid VSAN storage capabilities parameters. | ||||
| 	ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) | ||||
| 		scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal | ||||
| 		scParameters[PolicyDiskStripes] = StripeWidthCapabilityVal | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	// Invalid policy on a VSAN test bed. | ||||
| 	// diskStripes value has to be between 1 and 12. | ||||
| 	ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) | ||||
| 		scParameters[PolicyDiskStripes] = DiskStripesCapabilityInvalidVal | ||||
| 		scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "Invalid value for " + PolicyDiskStripes + "." | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	// Invalid policy on a VSAN test bed. | ||||
| 	// hostFailuresToTolerate value has to be between 0 and 3 including. | ||||
| 	ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) | ||||
| 		scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "." | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	// Specify a valid VSAN policy on a non-VSAN test bed. | ||||
| 	// The test should fail. | ||||
| 	ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, vmfsDatastore)) | ||||
| 		scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal | ||||
| 		scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal | ||||
| 		scParameters[Datastore] = vmfsDatastore | ||||
| 		framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) | ||||
| 		err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "The specified datastore: \\\"" + vmfsDatastore + "\\\" is not a VSAN datastore. " + | ||||
| 			"The policy parameters will work only with VSAN Datastore." | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) | ||||
| 		scParameters[SpbmStoragePolicy] = policyName | ||||
| 		scParameters[DiskFormat] = ThinDisk | ||||
| 		framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters) | ||||
| 		invokeValidPolicyTest(ctx, f, client, namespace, scParameters) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func(ctx context.Context) { | ||||
| 		scParameters[PolicyDiskStripes] = diskStripesCapabilityMaxVal | ||||
| 		scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal | ||||
| 		scParameters[Datastore] = vsanDatastore | ||||
| 		framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters) | ||||
| 		kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName) | ||||
| 		controlPlaneNode, err := getControlPlaneNode(ctx, client) | ||||
| 		framework.ExpectNoError(err) | ||||
| 		invokeStaleDummyVMTestWithStoragePolicy(ctx, client, controlPlaneNode, namespace, kubernetesClusterName, scParameters) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, vsanDatastore)) | ||||
| 		scParameters[SpbmStoragePolicy] = tagPolicy | ||||
| 		scParameters[Datastore] = vsanDatastore | ||||
| 		scParameters[DiskFormat] = ThinDisk | ||||
| 		framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters) | ||||
| 		err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = BronzeStoragePolicy | ||||
| 		scParameters[DiskFormat] = ThinDisk | ||||
| 		framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters) | ||||
| 		err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) | ||||
| 		scParameters[SpbmStoragePolicy] = policyName | ||||
| 		gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty()) | ||||
| 		scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal | ||||
| 		scParameters[DiskFormat] = ThinDisk | ||||
| 		framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters) | ||||
| 		err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func invokeValidPolicyTest(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { | ||||
| 	ginkgo.By("Creating Storage Class With storage policy params") | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.By("Creating pod to attach PV to the node") | ||||
| 	// Create pod to attach Volume to Node | ||||
| 	pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.By("Verify the volume is accessible and available in the pod") | ||||
| 	verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) | ||||
|  | ||||
| 	ginkgo.By("Deleting pod") | ||||
| 	framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod)) | ||||
|  | ||||
| 	ginkgo.By("Waiting for volumes to be detached from the node") | ||||
| 	framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) | ||||
| } | ||||
|  | ||||
| func invokeInvalidPolicyTestNeg(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string) error { | ||||
| 	ginkgo.By("Creating Storage Class With storage policy params") | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) | ||||
| 	framework.ExpectError(err) | ||||
|  | ||||
| 	eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) | ||||
| } | ||||
|  | ||||
| // invokeStaleDummyVMTestWithStoragePolicy assumes control plane node is present on the datacenter specified in the workspace section of vsphere.conf file. | ||||
| // With in-tree VCP, when the volume is created using storage policy, shadow (dummy) VM is getting created and deleted to apply SPBM policy on the volume. | ||||
| func invokeStaleDummyVMTestWithStoragePolicy(ctx context.Context, client clientset.Interface, controlPlaneNode string, namespace string, clusterName string, scParameters map[string]string) { | ||||
| 	ginkgo.By("Creating Storage Class With storage policy params") | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
| 	ginkgo.By("Expect claim to fail provisioning volume") | ||||
| 	_, err = e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, 2*time.Minute) | ||||
| 	framework.ExpectError(err) | ||||
|  | ||||
| 	updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
| 	// Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM | ||||
| 	time.Sleep(6 * time.Minute) | ||||
|  | ||||
| 	fnvHash := fnv.New32a() | ||||
| 	fnvHash.Write([]byte(vmName)) | ||||
| 	dummyVMFullName := dummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) | ||||
| 	errorMsg := "Dummy VM - " + vmName + " is still present. Failing the test.." | ||||
| 	nodeInfo := TestContext.NodeMapper.GetNodeInfo(controlPlaneNode) | ||||
| 	isVMPresentFlag, err := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	if isVMPresentFlag { | ||||
| 		framework.Failf("VM with name %s is present, %s", dummyVMFullName, errorMsg) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func getControlPlaneNode(ctx context.Context, client clientset.Interface) (string, error) { | ||||
| 	regKubeScheduler := regexp.MustCompile("kube-scheduler-.*") | ||||
| 	regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*") | ||||
|  | ||||
| 	podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{}) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	if len(podList.Items) < 1 { | ||||
| 		return "", fmt.Errorf("could not find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem) | ||||
| 	} | ||||
| 	for _, pod := range podList.Items { | ||||
| 		if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) { | ||||
| 			return pod.Spec.NodeName, nil | ||||
| 		} | ||||
| 	} | ||||
| 	return "", fmt.Errorf("could not find any nodes where control plane pods are running") | ||||
| } | ||||
| @@ -1,537 +0,0 @@ | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package vsphere | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/onsi/ginkgo/v2" | ||||
| 	"github.com/onsi/gomega" | ||||
|  | ||||
| 	v1 "k8s.io/api/core/v1" | ||||
| 	storagev1 "k8s.io/api/storage/v1" | ||||
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||||
| 	clientset "k8s.io/client-go/kubernetes" | ||||
| 	volumeevents "k8s.io/kubernetes/pkg/controller/volume/events" | ||||
| 	"k8s.io/kubernetes/test/e2e/feature" | ||||
| 	"k8s.io/kubernetes/test/e2e/framework" | ||||
| 	e2enode "k8s.io/kubernetes/test/e2e/framework/node" | ||||
| 	e2epod "k8s.io/kubernetes/test/e2e/framework/pod" | ||||
| 	e2epv "k8s.io/kubernetes/test/e2e/framework/pv" | ||||
| 	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" | ||||
| 	"k8s.io/kubernetes/test/e2e/storage/utils" | ||||
| 	admissionapi "k8s.io/pod-security-admission/api" | ||||
| ) | ||||
|  | ||||
| /* | ||||
|    Test to verify multi-zone support for dynamic volume provisioning in kubernetes. | ||||
|    The test environment is illustrated below: | ||||
|  | ||||
|    datacenter-1 | ||||
|         --->cluster-vsan-1 (zone-a)                              ____________________    _________________ | ||||
|                 --->host-1         : master                     |                    |  |                 | | ||||
|                 --->host-2         : node1  ___________________ |                    |  |                 | | ||||
|                 --->host-3 (zone-c): node2 |                   ||    vsanDatastore   |  |                 | | ||||
|                                            |  localDatastore   ||                    |  |                 | | ||||
|                                            |___________________||____________________|  |   sharedVmfs-0  | | ||||
|         --->cluster-vsan-2 (zone-b)                              ____________________   |                 | | ||||
|                 --->host-4         : node3                      |                    |  |                 | | ||||
|                 --->host-5         : node4                      |  vsanDatastore (1) |  |                 | | ||||
|                 --->host-6                                      |                    |  |                 | | ||||
|                                                                 |____________________|  |_________________| | ||||
|         --->cluster-3 (zone-c)              ___________________ | ||||
|                 --->host-7         : node5 |                   | | ||||
|                                            | localDatastore (1)| | ||||
|                                            |___________________| | ||||
|    datacenter-2 | ||||
|         --->cluster-1 (zone-d)             ___________________ | ||||
|                 --->host-8        : node6 |                   | | ||||
|                                           |  localDatastore   | | ||||
|                                           |___________________| | ||||
|  | ||||
| 	Testbed description : | ||||
| 	1. cluster-vsan-1 is tagged with zone-a. So, vsanDatastore inherits zone-a since all the hosts under zone-a have vsanDatastore mounted on them. | ||||
| 	2. cluster-vsan-2 is tagged with zone-b. So, vsanDatastore (1) inherits zone-b since all the hosts under zone-b have vsanDatastore (1) mounted on them. | ||||
| 	3. sharedVmfs-0 inherits both zone-a and zone-b since all the hosts in both zone-a and zone-b have this datastore mounted on them. | ||||
| 	4. cluster-3 is tagged with zone-c. cluster-3 only contains host-7. | ||||
| 	5. host-3 under cluster-vsan-1 is tagged with zone-c. | ||||
| 	6. Since there are no shared datastores between host-7 under cluster-3 and host-3 under cluster-vsan-1, no datastores in the environment inherit zone-c. | ||||
| 	7. host-8 under datacenter-2 and cluster-1 is tagged with zone-d. So, localDatastore attached to host-8 inherits zone-d. | ||||
| 	8. The six worker nodes are distributed among the hosts as shown in the above illustration. | ||||
| 	9. Two storage policies are created on VC. One is a VSAN storage policy named as compatpolicy with hostFailuresToTolerate capability set to 1. | ||||
|  | ||||
| 	Testsuite description : | ||||
| 	1. Tests to verify that zone labels are set correctly on a dynamically created PV. | ||||
| 	2. Tests to verify dynamic pv creation fails if availability zones are not specified or if there are no shared datastores under the specified zones. | ||||
| 	3. Tests to verify dynamic pv creation using availability zones works in combination with other storage class parameters such as storage policy, | ||||
| 	   datastore and VSAN capabilities. | ||||
| 	4. Tests to verify dynamic pv creation using availability zones fails in combination with other storage class parameters such as storage policy, | ||||
| 	   datastore and VSAN capabilities specifications when any of the former mentioned parameters are incompatible with the rest. | ||||
| 	5. Tests to verify dynamic pv creation using availability zones work across different datacenters in the same VC. | ||||
| */ | ||||
|  | ||||
| var _ = utils.SIGDescribe("Zone Support", feature.Vsphere, func() { | ||||
| 	f := framework.NewDefaultFramework("zone-support") | ||||
| 	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged | ||||
| 	var ( | ||||
| 		scParameters    map[string]string | ||||
| 		zones           []string | ||||
| 		vsanDatastore1  string | ||||
| 		vsanDatastore2  string | ||||
| 		localDatastore  string | ||||
| 		compatPolicy    string | ||||
| 		nonCompatPolicy string | ||||
| 		zoneA           string | ||||
| 		zoneB           string | ||||
| 		zoneC           string | ||||
| 		zoneD           string | ||||
| 		invalidZone     string | ||||
| 	) | ||||
| 	ginkgo.BeforeEach(func(ctx context.Context) { | ||||
| 		e2eskipper.SkipUnlessProviderIs("vsphere") | ||||
| 		Bootstrap(f) | ||||
| 		e2eskipper.SkipUnlessMultizone(ctx, f.ClientSet) | ||||
| 		vsanDatastore1 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore1) | ||||
| 		vsanDatastore2 = GetAndExpectStringEnvVar(VCPZoneVsanDatastore2) | ||||
| 		localDatastore = GetAndExpectStringEnvVar(VCPZoneLocalDatastore) | ||||
| 		compatPolicy = GetAndExpectStringEnvVar(VCPZoneCompatPolicyName) | ||||
| 		nonCompatPolicy = GetAndExpectStringEnvVar(VCPZoneNonCompatPolicyName) | ||||
| 		zoneA = GetAndExpectStringEnvVar(VCPZoneA) | ||||
| 		zoneB = GetAndExpectStringEnvVar(VCPZoneB) | ||||
| 		zoneC = GetAndExpectStringEnvVar(VCPZoneC) | ||||
| 		zoneD = GetAndExpectStringEnvVar(VCPZoneD) | ||||
| 		invalidZone = GetAndExpectStringEnvVar(VCPInvalidZone) | ||||
| 		scParameters = make(map[string]string) | ||||
| 		zones = make([]string, 0) | ||||
| 		_, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) | ||||
| 		framework.ExpectNoError(err) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) | ||||
| 		zones = append(zones, zoneA) | ||||
| 		verifyPVZoneLabels(ctx, f, nil, zones) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) | ||||
| 		zones = append(zones, zoneA) | ||||
| 		zones = append(zones, zoneB) | ||||
| 		verifyPVZoneLabels(ctx, f, nil, zones) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", invalidZone)) | ||||
| 		zones = append(zones, invalidZone) | ||||
| 		err := verifyPVCCreationFails(ctx, f, nil, zones, "") | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "Failed to find a shared datastore matching zone [" + invalidZone + "]" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) | ||||
| 		zones = append(zones, zoneA) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, nil, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) | ||||
| 		zones = append(zones, zoneA) | ||||
| 		zones = append(zones, zoneB) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, nil, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) | ||||
| 		scParameters[Datastore] = vsanDatastore1 | ||||
| 		zones = append(zones, zoneA) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) | ||||
| 		scParameters[Datastore] = vsanDatastore1 | ||||
| 		zones = append(zones, zoneC) | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, zones, "") | ||||
| 		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		zones = append(zones, zoneA) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		zones = append(zones, zoneB) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = nonCompatPolicy | ||||
| 		zones = append(zones, zoneA) | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, zones, "") | ||||
| 		errorMsg := "No compatible datastores found that satisfy the storage policy requirements" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		scParameters[Datastore] = vsanDatastore1 | ||||
| 		zones = append(zones, zoneA) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = nonCompatPolicy | ||||
| 		scParameters[Datastore] = vsanDatastore1 | ||||
| 		zones = append(zones, zoneA) | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, zones, "") | ||||
| 		errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"." | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		scParameters[Datastore] = vsanDatastore2 | ||||
| 		zones = append(zones, zoneC) | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, zones, "") | ||||
| 		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with no zones")) | ||||
| 		err := verifyPVCCreationFails(ctx, f, nil, nil, "") | ||||
| 		errorMsg := "No shared datastores found in the Kubernetes cluster" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) | ||||
| 		scParameters[Datastore] = vsanDatastore1 | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, nil, "") | ||||
| 		errorMsg := "No shared datastores found in the Kubernetes cluster" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, nil, "") | ||||
| 		errorMsg := "No shared datastores found in the Kubernetes cluster" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		scParameters[Datastore] = vsanDatastore1 | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, nil, "") | ||||
| 		errorMsg := "No shared datastores found in the Kubernetes cluster" | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) | ||||
| 		zones = append(zones, zoneC) | ||||
| 		err := verifyPVCCreationFails(ctx, f, nil, zones, "") | ||||
| 		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) | ||||
| 		zones = append(zones, zoneA) | ||||
| 		zones = append(zones, zoneC) | ||||
| 		err := verifyPVCCreationFails(ctx, f, nil, zones, "") | ||||
| 		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", PolicyHostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) | ||||
| 		scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal | ||||
| 		zones = append(zones, zoneA) | ||||
| 		err := verifyPVCCreationFails(ctx, f, scParameters, zones, "") | ||||
| 		errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "." | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", PolicyObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, PolicyIopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA)) | ||||
| 		scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal | ||||
| 		scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal | ||||
| 		scParameters[Datastore] = vsanDatastore1 | ||||
| 		zones = append(zones, zoneA) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD)) | ||||
| 		zones = append(zones, zoneD) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore)) | ||||
| 		scParameters[Datastore] = localDatastore | ||||
| 		zones = append(zones, zoneD) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, "") | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		zones = append(zones, zoneA) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB)) | ||||
| 		zones = append(zones, zoneA) | ||||
| 		zones = append(zones, zoneB) | ||||
| 		verifyPVCAndPodCreationSucceeds(ctx, f, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer) | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and following zones :%s and %s", zoneA, zoneC)) | ||||
| 		zones = append(zones, zoneA) | ||||
| 		zones = append(zones, zoneC) | ||||
| 		err := verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx, f, nil, zones) | ||||
| 		framework.ExpectError(err) | ||||
| 		errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC | ||||
| 		if !strings.Contains(err.Error(), errorMsg) { | ||||
| 			framework.ExpectNoError(err, errorMsg) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	ginkgo.It("Verify a pod fails to get scheduled when conflicting volume topology (allowedTopologies) and pod scheduling constraints(nodeSelector) are specified", func(ctx context.Context) { | ||||
| 		ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumerMode, storage policy :%s and zone :%s", compatPolicy, zoneA)) | ||||
| 		scParameters[SpbmStoragePolicy] = compatPolicy | ||||
| 		// allowedTopologies set as zoneA | ||||
| 		zones = append(zones, zoneA) | ||||
| 		nodeSelectorMap := map[string]string{ | ||||
| 			// nodeSelector set as zoneB | ||||
| 			v1.LabelTopologyZone: zoneB, | ||||
| 		} | ||||
| 		verifyPodSchedulingFails(ctx, f, nodeSelectorMap, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) | ||||
| 	}) | ||||
| }) | ||||
|  | ||||
| func verifyPVCAndPodCreationSucceeds(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { | ||||
| 	client := f.ClientSet | ||||
| 	namespace := f.Namespace.Name | ||||
| 	timeouts := f.Timeouts | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
|  | ||||
| 	var persistentvolumes []*v1.PersistentVolume | ||||
| 	// If WaitForFirstConsumer mode, verify pvc binding status after pod creation. For immediate mode, do now. | ||||
| 	if volumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer { | ||||
| 		persistentvolumes = waitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) | ||||
| 	} | ||||
|  | ||||
| 	ginkgo.By("Creating pod to attach PV to the node") | ||||
| 	pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { | ||||
| 		persistentvolumes = waitForPVClaimBoundPhase(ctx, client, pvclaims, timeouts.ClaimProvision) | ||||
| 	} | ||||
|  | ||||
| 	if zones != nil { | ||||
| 		ginkgo.By("Verify persistent volume was created on the right zone") | ||||
| 		verifyVolumeCreationOnRightZone(ctx, persistentvolumes, pod.Spec.NodeName, zones) | ||||
| 	} | ||||
|  | ||||
| 	ginkgo.By("Verify the volume is accessible and available in the pod") | ||||
| 	verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) | ||||
|  | ||||
| 	ginkgo.By("Deleting pod") | ||||
| 	framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod)) | ||||
|  | ||||
| 	ginkgo.By("Waiting for volumes to be detached from the node") | ||||
| 	framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) | ||||
| } | ||||
|  | ||||
| func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string) error { | ||||
| 	client := f.ClientSet | ||||
| 	namespace := f.Namespace.Name | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
|  | ||||
| 	ginkgo.By("Creating a pod") | ||||
| 	pod := e2epod.MakePod(namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 	pod, err = client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod) | ||||
|  | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) | ||||
| 	framework.ExpectError(err) | ||||
|  | ||||
| 	eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	// Look for PVC ProvisioningFailed event and return the message. | ||||
| 	for _, event := range eventList.Items { | ||||
| 		if event.Source.Component == "persistentvolume-controller" && event.Reason == volumeevents.ProvisioningFailed { | ||||
| 			return fmt.Errorf("Failure message: %s", event.Message) | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func waitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) []*v1.PersistentVolume { | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, timeout) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	return persistentvolumes | ||||
| } | ||||
|  | ||||
| func verifyPodSchedulingFails(ctx context.Context, f *framework.Framework, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { | ||||
| 	client := f.ClientSet | ||||
| 	namespace := f.Namespace.Name | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
|  | ||||
| 	ginkgo.By("Creating a pod") | ||||
| 	pod, err := e2epod.CreateUnschedulablePod(ctx, client, namespace, nodeSelector, pvclaims, f.NamespacePodSecurityLevel, "") | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, client, pod) | ||||
| } | ||||
|  | ||||
| func verifyPVCCreationFails(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) error { | ||||
| 	client := f.ClientSet | ||||
| 	namespace := f.Namespace.Name | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the Storage Class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) | ||||
| 	framework.ExpectError(err) | ||||
|  | ||||
| 	eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	framework.Logf("Failure message : %+q", eventList.Items[0].Message) | ||||
| 	return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) | ||||
| } | ||||
|  | ||||
| func verifyPVZoneLabels(ctx context.Context, f *framework.Framework, scParameters map[string]string, zones []string) { | ||||
| 	client := f.ClientSet | ||||
| 	namespace := f.Namespace.Name | ||||
| 	storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{}) | ||||
| 	framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) | ||||
| 	ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, storageclass.Name, metav1.DeleteOptions{}) | ||||
|  | ||||
| 	ginkgo.By("Creating PVC using the storage class") | ||||
| 	pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) | ||||
| 	framework.ExpectNoError(err) | ||||
| 	ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) | ||||
|  | ||||
| 	var pvclaims []*v1.PersistentVolumeClaim | ||||
| 	pvclaims = append(pvclaims, pvclaim) | ||||
| 	ginkgo.By("Waiting for claim to be in bound phase") | ||||
| 	persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) | ||||
| 	framework.ExpectNoError(err) | ||||
|  | ||||
| 	ginkgo.By("Verify zone information is present in the volume labels") | ||||
| 	for _, pv := range persistentvolumes { | ||||
| 		// Multiple zones are separated with "__" | ||||
| 		pvZoneLabels := strings.Split(pv.ObjectMeta.Labels[v1.LabelTopologyZone], "__") | ||||
| 		for _, zone := range zones { | ||||
| 			gomega.Expect(pvZoneLabels).Should(gomega.ContainElement(zone), "Incorrect or missing zone labels in pv.") | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										2
									
								
								vendor/github.com/vmware/govmomi/.dockerignore
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								vendor/github.com/vmware/govmomi/.dockerignore
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,2 +0,0 @@ | ||||
| Dockerfile* | ||||
| .*ignore | ||||
							
								
								
									
										13
									
								
								vendor/github.com/vmware/govmomi/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										13
									
								
								vendor/github.com/vmware/govmomi/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,13 +0,0 @@ | ||||
| secrets.yml | ||||
| dist/ | ||||
| .idea/ | ||||
|  | ||||
| # ignore tools binaries | ||||
| /git-chglog | ||||
|  | ||||
| # ignore RELEASE-specific CHANGELOG | ||||
| /RELEASE_CHANGELOG.md | ||||
|  | ||||
| # Ignore editor temp files | ||||
| *~ | ||||
| .vscode/ | ||||
							
								
								
									
										18
									
								
								vendor/github.com/vmware/govmomi/.golangci.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								vendor/github.com/vmware/govmomi/.golangci.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,18 +0,0 @@ | ||||
| linters: | ||||
|   disable-all: true | ||||
|   enable: | ||||
|   - goimports | ||||
|   - govet | ||||
|   # Run with --fast=false for more extensive checks | ||||
|   fast: true | ||||
| # override defaults | ||||
| linters-settings: | ||||
|   goimports: | ||||
|     # put imports beginning with prefix after 3rd-party packages; | ||||
|     # it's a comma-separated list of prefixes | ||||
|     local-prefixes: github.com/vmware/govmomi | ||||
| run: | ||||
|   timeout: 6m | ||||
|   skip-dirs: | ||||
|   - vim25/xml | ||||
|   - cns/types | ||||
							
								
								
									
										151
									
								
								vendor/github.com/vmware/govmomi/.goreleaser.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										151
									
								
								vendor/github.com/vmware/govmomi/.goreleaser.yml
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,151 +0,0 @@ | ||||
| --- | ||||
| project_name: govmomi | ||||
|  | ||||
| builds: | ||||
|   - id: govc | ||||
|     goos: &goos-defs | ||||
|       - linux | ||||
|       - darwin | ||||
|       - windows | ||||
|       - freebsd | ||||
|     goarch: &goarch-defs | ||||
|       - amd64 | ||||
|       - arm | ||||
|       - arm64 | ||||
|       - mips64le | ||||
|     env: | ||||
|       - CGO_ENABLED=0 | ||||
|       - PKGPATH=github.com/vmware/govmomi/govc/flags | ||||
|     main: ./govc/main.go | ||||
|     binary: govc | ||||
|     ldflags: | ||||
|       - "-X {{.Env.PKGPATH}}.BuildVersion={{.Version}} -X {{.Env.PKGPATH}}.BuildCommit={{.ShortCommit}} -X {{.Env.PKGPATH}}.BuildDate={{.Date}}" | ||||
|   - id: vcsim | ||||
|     goos: *goos-defs | ||||
|     goarch: *goarch-defs | ||||
|     env: | ||||
|       - CGO_ENABLED=0 | ||||
|     main: ./vcsim/main.go | ||||
|     binary: vcsim | ||||
|     ldflags: | ||||
|       - "-X main.buildVersion={{.Version}} -X main.buildCommit={{.ShortCommit}} -X main.buildDate={{.Date}}" | ||||
|  | ||||
| archives: | ||||
|   - id: govcbuild | ||||
|     builds: | ||||
|       - govc | ||||
|     name_template: >- | ||||
|       govc_ | ||||
|       {{- title .Os }}_ | ||||
|       {{- if eq .Arch "amd64" }}x86_64 | ||||
|       {{- else if eq .Arch "386" }}i386 | ||||
|       {{- else }}{{ .Arch }}{{ end }} | ||||
|     format_overrides: &overrides | ||||
|       - goos: windows | ||||
|         format: zip | ||||
|     files: &extrafiles | ||||
|       - CHANGELOG.md | ||||
|       - LICENSE.txt | ||||
|       - README.md | ||||
|  | ||||
|   - id: vcsimbuild | ||||
|     builds: | ||||
|       - vcsim | ||||
|     name_template: >- | ||||
|       vcsim_ | ||||
|       {{- title .Os }}_ | ||||
|       {{- if eq .Arch "amd64" }}x86_64 | ||||
|       {{- else if eq .Arch "386" }}i386 | ||||
|       {{- else }}{{ .Arch }}{{ end }} | ||||
|     format_overrides: *overrides | ||||
|     files: *extrafiles | ||||
|  | ||||
| snapshot: | ||||
|   name_template: "{{ .Tag }}-next" | ||||
|  | ||||
| checksum: | ||||
|   name_template: "checksums.txt" | ||||
|  | ||||
| changelog: | ||||
|   sort: asc | ||||
|   filters: | ||||
|     exclude: | ||||
|       - "^docs:" | ||||
|       - "^test:" | ||||
|       - Merge pull request | ||||
|       - Merge branch | ||||
|  | ||||
| # upload disabled since it is maintained in homebrew-core | ||||
| brews: | ||||
|   - name: govc | ||||
|     ids: | ||||
|       - govcbuild | ||||
|     tap: | ||||
|       owner: govmomi | ||||
|       name: homebrew-tap | ||||
|       # TODO: create token in specified tap repo, add as secret to govmomi repo and reference in release workflow | ||||
|       # token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}" | ||||
|     # enable once we do fully automated releases | ||||
|     skip_upload: true | ||||
|     commit_author: | ||||
|       name: Alfred the Narwhal | ||||
|       email: cna-alfred@vmware.com | ||||
|     folder: Formula | ||||
|     homepage: "https://github.com/vmware/govmomi/blob/master/govc/README.md" | ||||
|     description: "govc is a vSphere CLI built on top of govmomi." | ||||
|     test: | | ||||
|       system "#{bin}/govc version" | ||||
|     install: | | ||||
|       bin.install "govc" | ||||
|   - name: vcsim | ||||
|     ids: | ||||
|       - vcsimbuild | ||||
|     tap: | ||||
|       owner: govmomi | ||||
|       name: homebrew-tap | ||||
|       # TODO: create token in specified tap repo, add as secret to govmomi repo and reference in release workflow | ||||
|       # token: "{{ .Env.HOMEBREW_TAP_GITHUB_TOKEN }}" | ||||
|     # enable once we do fully automated releases | ||||
|     skip_upload: true | ||||
|     commit_author: | ||||
|       name: Alfred the Narwhal | ||||
|       email: cna-alfred@vmware.com | ||||
|     folder: Formula | ||||
|     homepage: "https://github.com/vmware/govmomi/blob/master/vcsim/README.md" | ||||
|     description: "vcsim is a vSphere API simulator built on top of govmomi." | ||||
|     test: | | ||||
|       system "#{bin}/vcsim -h" | ||||
|     install: | | ||||
|       bin.install "vcsim" | ||||
|  | ||||
| dockers: | ||||
|   - image_templates: | ||||
|       - "vmware/govc:{{ .Tag }}" | ||||
|       - "vmware/govc:{{ .ShortCommit }}" | ||||
|       - "vmware/govc:latest" | ||||
|     dockerfile: Dockerfile.govc | ||||
|     ids: | ||||
|       - govc | ||||
|     build_flag_templates: | ||||
|       - "--pull" | ||||
|       - "--label=org.opencontainers.image.created={{.Date}}" | ||||
|       - "--label=org.opencontainers.image.title={{.ProjectName}}" | ||||
|       - "--label=org.opencontainers.image.revision={{.FullCommit}}" | ||||
|       - "--label=org.opencontainers.image.version={{.Version}}" | ||||
|       - "--label=org.opencontainers.image.url=https://github.com/vmware/govmomi" | ||||
|       - "--platform=linux/amd64" | ||||
|   - image_templates: | ||||
|       - "vmware/vcsim:{{ .Tag }}" | ||||
|       - "vmware/vcsim:{{ .ShortCommit }}" | ||||
|       - "vmware/vcsim:latest" | ||||
|     dockerfile: Dockerfile.vcsim | ||||
|     ids: | ||||
|       - vcsim | ||||
|     build_flag_templates: | ||||
|       - "--pull" | ||||
|       - "--label=org.opencontainers.image.created={{.Date}}" | ||||
|       - "--label=org.opencontainers.image.title={{.ProjectName}}" | ||||
|       - "--label=org.opencontainers.image.revision={{.FullCommit}}" | ||||
|       - "--label=org.opencontainers.image.version={{.Version}}" | ||||
|       - "--label=org.opencontainers.image.url=https://github.com/vmware/govmomi" | ||||
|       - "--platform=linux/amd64" | ||||
							
								
								
									
										45
									
								
								vendor/github.com/vmware/govmomi/.mailmap
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										45
									
								
								vendor/github.com/vmware/govmomi/.mailmap
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,45 +0,0 @@ | ||||
| amanpaha <amanpahariya@microsoft.com> amanpaha <84718160+amanpaha@users.noreply.github.com> | ||||
| Amanda H. L. de Andrade <amanda.andrade@serpro.gov.br> Amanda Hager Lopes de Andrade Katz <amanda.katz@serpro.gov.br> | ||||
| Amanda H. L. de Andrade <amanda.andrade@serpro.gov.br> amandahla <amanda.andrade@serpro.gov.br> | ||||
| Amit Bathla <abathla@.vmware.com> <abathla@promb-1s-dhcp216.eng.vmware.com> | ||||
| Andrew Kutz <akutz@vmware.com> <sakutz@gmail.com> | ||||
| Andrew Kutz <akutz@vmware.com> akutz <akutz@vmware.com> | ||||
| Andrew Kutz <akutz@vmware.com> Andrew Kutz <101085+akutz@users.noreply.github.com> | ||||
| Anfernee Yongkun Gui <agui@vmware.com> <anfernee.gui@gmail.com> | ||||
| Anfernee Yongkun Gui <agui@vmware.com> Yongkun Anfernee Gui <agui@vmware.com> | ||||
| Anna Carrigan <anna.carrigan@hpe.com> Anna <anna.carrigan@outlook.com> | ||||
| Balu Dontu <bdontu@vmware.com> BaluDontu <bdontu@vmware.com> | ||||
| Bruce Downs <bruceadowns@gmail.com> <bdowns@vmware.com> | ||||
| Bruce Downs <bruceadowns@gmail.com> <bruce.downs@autodesk.com> | ||||
| Bruce Downs <bruceadowns@gmail.com> <bruce.downs@jivesoftware.com> | ||||
| Clint Greenwood <cgreenwood@vmware.com> <clint.greenwood@gmail.com> | ||||
| Cédric Blomart <cblomart@gmail.com> <cedric.blomart@minfin.fed.be> | ||||
| Cédric Blomart <cblomart@gmail.com> cedric <cblomart@gmail.com> | ||||
| David Stark <dave@davidstark.name> <david.stark@bskyb.com> | ||||
| Doug MacEachern <dougm@vmware.com> dougm <dougm@users.noreply.github.com> | ||||
| Eric Gray <egray@vmware.com> <ericgray@users.noreply.github.com> | ||||
| Eric Yutao <eric.yutao@gmail.com> eric <eric.yutao@gmail.com> | ||||
| Fabio Rapposelli <fabio@vmware.com> <fabio@rapposelli.org> | ||||
| Faiyaz Ahmed <faiyaza@vmware.com> Faiyaz Ahmed <ahmedf@vmware.com> | ||||
| Faiyaz Ahmed <faiyaza@vmware.com> Faiyaz Ahmed <faiyaza@gmail.com> | ||||
| Faiyaz Ahmed <faiyaza@vmware.com> Faiyaz Ahmed <fdawg4l@users.noreply.github.com> | ||||
| Henrik Hodne <henrik@travis-ci.com> <henrik@hodne.io> | ||||
| Ian Eyberg <ian@deferpanic.com> <ian@opuler.com> | ||||
| Jeremy Canady <jcanady@jackhenry.com> <jcanady@gmail.com> | ||||
| Jiatong Wang <wjiatong@vmware.com> jiatongw <wjiatong@vmware.com> | ||||
| Lintong Jiang <lintongj@vmware.com> lintongj <55512168+lintongj@users.noreply.github.com> | ||||
| Michael Gasch <mgasch@vmware.com> Michael Gasch <embano1@live.com> | ||||
| Mincho Tonev <mtonev@vmware.com> matonev <31008054+matonev@users.noreply.github.com> | ||||
| Parveen Chahal <parkuma@microsoft.com> <mail.chahal@gmail.com> | ||||
| Pieter Noordhuis <pnoordhuis@vmware.com> <pcnoordhuis@gmail.com> | ||||
| Saad Malik <saad@spectrocloud.com> <simfox3@gmail.com> | ||||
| Takaaki Furukawa <takaaki.frkw@gmail.com> takaaki.furukawa <takaaki.furukawa@mail.rakuten.com> | ||||
| Takaaki Furukawa <takaaki.frkw@gmail.com> tkak <takaaki.frkw@gmail.com> | ||||
| Uwe Bessle <Uwe.Bessle@iteratec.de> Uwe Bessle <u.bessle.extern@eos-ts.com> | ||||
| Uwe Bessle <Uwe.Bessle@iteratec.de> Uwe Bessle <uwe.bessle@web.de> | ||||
| Vadim Egorov <vegorov@vmware.com> <egorovv@gmail.com> | ||||
| William Lam <wlam@vmware.com> <info.virtuallyghetto@gmail.com> | ||||
| Yun Zhou <yunz@vmware.com> <41678287+gh05tn0va@users.noreply.github.com> | ||||
| Zach G <zguan@vmware.com> zach96guan <zach96guan@users.noreply.github.com> | ||||
| Zach Tucker <ztucker@vmware.com> <jzt@users.noreply.github.com> | ||||
| Zee Yang <zeey@vmware.com> <zee.yang@gmail.com> | ||||
							
								
								
									
										3515
									
								
								vendor/github.com/vmware/govmomi/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										3515
									
								
								vendor/github.com/vmware/govmomi/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										197
									
								
								vendor/github.com/vmware/govmomi/CONTRIBUTING.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										197
									
								
								vendor/github.com/vmware/govmomi/CONTRIBUTING.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,197 +0,0 @@ | ||||
| # Contributing to `govmomi` | ||||
|  | ||||
| ## Getting started | ||||
|  | ||||
| First, fork the repository on GitHub to your personal account. | ||||
|  | ||||
| Change `$USER` in the examples below to your Github username if they are not the | ||||
| same. | ||||
|  | ||||
| ```bash | ||||
| git clone https://github.com/vmware/govmomi.git && cd govmomi | ||||
|  | ||||
| # prevent accidentally pushing to vmware/govmomi | ||||
| git config push.default nothing | ||||
| git remote rename origin vmware | ||||
|  | ||||
| # add your fork | ||||
| git remote add $USER git@github.com:$USER/govmomi.git | ||||
|  | ||||
| git fetch -av | ||||
| ``` | ||||
|  | ||||
| ## Contribution Flow | ||||
|  | ||||
| This is a rough outline of what a contributor's workflow looks like: | ||||
|  | ||||
| - Create an issue describing the feature/fix | ||||
| - Create a topic branch from where you want to base your work. | ||||
| - Make commits of logical units. | ||||
| - Make sure your commit messages are in the proper format (see below). | ||||
| - Push your changes to a topic branch in your fork of the repository. | ||||
| - Submit a pull request to `vmware/govmomi`. | ||||
|  | ||||
| See [below](#format-of-the-commit-message) for details on commit best practices | ||||
| and **supported prefixes**, e.g. `govc: <message>`. | ||||
|  | ||||
| > **Note:** If you are new to Git(hub) check out [Git rebase, squash...oh | ||||
| > my!](https://www.mgasch.com/2021/05/git-basics/) for more details on how to | ||||
| > successfully contribute to an open source project. | ||||
|  | ||||
| ### Example 1 - Fix a Bug in `govmomi` | ||||
|  | ||||
| ```bash | ||||
| git checkout -b issue-<number> vmware/master | ||||
| git add <files> | ||||
| git commit -m "fix: ..." -m "Closes: #<issue-number>" | ||||
| git push $USER issue-<number> | ||||
| ``` | ||||
|  | ||||
| ### Example 2 - Add a new (non-breaking) API to `govmomi` | ||||
|  | ||||
| ```bash | ||||
| git checkout -b issue-<number> vmware/master | ||||
| git add <files> | ||||
| git commit -m "Add API ..." -m "Closes: #<issue-number>" | ||||
| git push $USER issue-<number> | ||||
| ``` | ||||
|  | ||||
| ### Example 3 - Add a Feature to `govc` | ||||
|  | ||||
| ```bash | ||||
| git checkout -b issue-<number> vmware/master | ||||
| git add <files> | ||||
| git commit -m "govc: Add feature ..." -m "Closes: #<issue-number>" | ||||
| git push $USER issue-<number> | ||||
| ``` | ||||
| **Note**:   | ||||
| To register the new `govc` command package, add a blank `_` import to `govmomi/govc/main.go`. | ||||
|  | ||||
| ### Example 4 - Fix a Bug in `vcsim` | ||||
|  | ||||
| ```bash | ||||
| git checkout -b issue-<number> vmware/master | ||||
| git add <files> | ||||
| git commit -m "vcsim: Fix ..." -m "Closes: #<issue-number>" | ||||
| git push $USER issue-<number> | ||||
| ``` | ||||
|  | ||||
| ### Example 5 - Document Breaking (API) Changes | ||||
|  | ||||
| Breaking changes, e.g. to the `govmomi` APIs, are highlighted in the `CHANGELOG` | ||||
| and release notes when the keyword `BREAKING:` is used in the commit message | ||||
| body.  | ||||
|  | ||||
| The text after `BREAKING:` is used in the corresponding highlighted section. | ||||
| Thus these details should be stated at the body of the commit message. | ||||
| Multi-line strings are supported. | ||||
|  | ||||
| ```bash | ||||
| git checkout -b issue-<number> vmware/master | ||||
| git add <files> | ||||
| cat << EOF | git commit -F - | ||||
| Add ctx to funcXYZ | ||||
|  | ||||
| This commit introduces context.Context to function XYZ | ||||
| Closes: #1234 | ||||
|  | ||||
| BREAKING: Add ctx to funcXYZ() | ||||
| EOF | ||||
|  | ||||
| git push $USER issue-<number> | ||||
| ``` | ||||
|  | ||||
| ### Stay in sync with Upstream | ||||
|  | ||||
| When your branch gets out of sync with the vmware/master branch, use the | ||||
| following to update (rebase): | ||||
|  | ||||
| ```bash | ||||
| git checkout issue-<number> | ||||
| git fetch -a | ||||
| git rebase vmware/master | ||||
| git push --force-with-lease $USER issue-<number> | ||||
| ``` | ||||
|  | ||||
| ### Updating Pull Requests | ||||
|  | ||||
| If your PR fails to pass CI or needs changes based on code review, it's ok to | ||||
| add more commits stating the changes made, e.g. "Address review comments". This | ||||
| is to assist the reviewer(s) to easily detect and review the recent changes. | ||||
|  | ||||
| In case of small PRs, it's ok to squash and force-push (see further below) | ||||
| directly instead. | ||||
|  | ||||
| ```bash | ||||
| # incorporate review feedback | ||||
| git add . | ||||
|  | ||||
| # create a fixup commit which will be merged into your (original) <commit> | ||||
| git commit --fixup <commit> | ||||
| git push $USER issue-<number> | ||||
| ``` | ||||
|  | ||||
| Be sure to add a comment to the PR indicating your new changes are ready to | ||||
| review, as Github does not generate a notification when you git push. | ||||
|  | ||||
| Once the review is complete, squash and push your final commit(s): | ||||
|  | ||||
| ```bash | ||||
| # squash all commits into one | ||||
| # --autosquash will automatically detect and merge fixup commits | ||||
| git rebase -i --autosquash vmware/master | ||||
| git push --force-with-lease $USER issue-<number> | ||||
| ``` | ||||
|  | ||||
| ### Code Style | ||||
|  | ||||
| The coding style suggested by the Go community is used in `govmomi`. See the | ||||
| [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details. | ||||
|  | ||||
| Try to limit column width to 120 characters for both code and markdown documents | ||||
| such as this one. | ||||
|  | ||||
| ### Format of the Commit Message | ||||
|  | ||||
| We follow the conventions described in [How to Write a Git Commit | ||||
| Message](http://chris.beams.io/posts/git-commit/). | ||||
|  | ||||
| Be sure to include any related GitHub issue references in the commit message, | ||||
| e.g. `Closes: #<number>`. | ||||
|  | ||||
| The [`CHANGELOG.md`](./CHANGELOG.md) and release page uses **commit message | ||||
| prefixes** for grouping and highlighting. A commit message that | ||||
| starts with `[prefix:] ` will place this commit under the respective | ||||
| section in the `CHANGELOG`.  | ||||
|  | ||||
| The following example creates a commit referencing the `issue: 1234` and puts | ||||
| the commit message in the `govc` `CHANGELOG` section: | ||||
|  | ||||
| ```bash | ||||
| git commit -s -m "govc: Add CLI command X" -m "Closes: #1234" | ||||
| ``` | ||||
|  | ||||
| Currently the following prefixes are used: | ||||
|  | ||||
| - `api:` - Use for API-related changes | ||||
| - `govc:` - Use for changes to `govc` CLI | ||||
| - `vcsim:` - Use for changes to vCenter Simulator | ||||
| - `chore:` - Use for repository related activities | ||||
| - `fix:` - Use for bug fixes | ||||
| - `docs:` - Use for changes to the documentation | ||||
| - `examples:` - Use for changes to examples | ||||
|  | ||||
| If your contribution falls into multiple categories, e.g. `api` and `vcsim` it | ||||
| is recommended to break up your commits using distinct prefixes. | ||||
|  | ||||
| ### Running CI Checks and Tests | ||||
| You can run both `make check` and `make test` from the top level of the | ||||
| repository.  | ||||
|  | ||||
| While `make check` will catch formatting and import errors, it will not apply | ||||
| any fixes. The developer is expected to do that. | ||||
|  | ||||
| ## Reporting Bugs and Creating Issues | ||||
|  | ||||
| When opening a new issue, try to roughly follow the commit message format | ||||
| conventions above. | ||||
							
								
								
									
										256
									
								
								vendor/github.com/vmware/govmomi/CONTRIBUTORS
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										256
									
								
								vendor/github.com/vmware/govmomi/CONTRIBUTORS
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,256 +0,0 @@ | ||||
| # People who can (and typically have) contributed to this repository. | ||||
| # | ||||
| # This script is generated by contributors.sh | ||||
| # | ||||
|  | ||||
| Abhijeet Kasurde <akasurde@redhat.com> | ||||
| abrarshivani <abrarshivani@users.noreply.github.com> | ||||
| Adam Chalkley <atc0005@users.noreply.github.com> | ||||
| Adam Fowler <adam@adamfowler.org> | ||||
| Adam Shannon <adamkshannon@gmail.com> | ||||
| Akanksha Panse <pansea@vmware.com> | ||||
| Al Biheiri <abiheiri@apple.com> | ||||
| Alessandro Cortiana <alessandro.cortiana@gmail.com> | ||||
| Alex <puzo2002@gmail.com> | ||||
| Alex Bozhenko <alexbozhenko@fb.com> | ||||
| Alex Ellis (VMware) <alexellis2@gmail.com> | ||||
| Aligator <8278538+yet-another-aligator@users.noreply.github.com> | ||||
| Alvaro Miranda <kikitux@gmail.com> | ||||
| Amanda H. L. de Andrade <amanda.andrade@serpro.gov.br> | ||||
| amanpaha <amanpahariya@microsoft.com> | ||||
| Amit Bathla <abathla@.vmware.com> | ||||
| amit bezalel <amit.bezalel@hpe.com> | ||||
| Andrew <AndrewDi@users.noreply.github.com> | ||||
| Andrew Chin <andrew@andrewtchin.com> | ||||
| Andrew Kutz <akutz@vmware.com> | ||||
| Andrey Klimentyev <andrey.klimentyev@flant.com> | ||||
| Anfernee Yongkun Gui <agui@vmware.com> | ||||
| angystardust <angystardust@users.noreply.github.com> | ||||
| aniketGslab <aniket.shinde@gslab.com> | ||||
| Ankit Vaidya <vaidyaa@vmware.com> | ||||
| Ankur Huralikoppi <huralikoppia@vmware.com> | ||||
| Anna Carrigan <anna.carrigan@hpe.com> | ||||
| Antony Saba <awsaba@gmail.com> | ||||
| Ariel Chinn <arielchinn@gmail.com> | ||||
| Arran Walker <arran.walker@zopa.com> | ||||
| Artem Anisimov <aanisimov@inbox.ru> | ||||
| Arunesh Pandey <parunesh@vmware.com> | ||||
| Aryeh Weinreb <aryehweinreb@gmail.com> | ||||
| Augy StClair <augy@google.com> | ||||
| Austin Parker <aparker@apprenda.com> | ||||
| Balu Dontu <bdontu@vmware.com> | ||||
| bastienbc <bastien.barbe.creuly@gmail.com> | ||||
| Ben Corrie <bcorrie@vmware.com> | ||||
| Ben Vickers <bvickers@pivotal.io> | ||||
| Benjamin Davini <davinib@vmware.com> | ||||
| Benjamin Peterson <benjamin@python.org> | ||||
| Benjamin Vickers <bvickers@vmware.com> | ||||
| Bhavya Choudhary <bhavyac@vmware.com> | ||||
| Bob Killen <killen.bob@gmail.com> | ||||
| Brad Fitzpatrick <bradfitz@golang.org> | ||||
| Brian Rak <brak@vmware.com> | ||||
| brian57860 <brian57860@users.noreply.github.com> | ||||
| Bruce Downs <bruceadowns@gmail.com> | ||||
| Bryan Venteicher <bryanventeicher@gmail.com> | ||||
| Cédric Blomart <cblomart@gmail.com> | ||||
| Cheng Cheng <chengch@vmware.com> | ||||
| Chethan Venkatesh <chethanv@vmware.com> | ||||
| Choudhury Sarada Prasanna Nanda <cspn@google.com> | ||||
| Chris Marchesi <chrism@vancluevertech.com> | ||||
| Christian Höltje <docwhat@gerf.org> | ||||
| Clint Greenwood <cgreenwood@vmware.com> | ||||
| cpiment <pimentel.carlos@gmail.com> | ||||
| CuiHaozhi <cuihaozhi@chinacloud.com.cn> | ||||
| Dan Ilan <danilan@google.com> | ||||
| Dan Norris <protochron@users.noreply.github.com> | ||||
| Daniel Frederick Crisman <daniel@crisman.org> | ||||
| Daniel Mueller <deso@posteo.net> | ||||
| Danny Lockard <danny.lockard@banno.com> | ||||
| Dave Gress <gressd@vmware.com> | ||||
| Dave Smith-Uchida <dsmithuchida@vmware.com> | ||||
| Dave Tucker <dave@dtucker.co.uk> | ||||
| David Gress <gressd@vmware.com> | ||||
| David Stark <dave@davidstark.name> | ||||
| Davide Agnello <dagnello@hp.com> | ||||
| Davinder Kumar <davinderk@vmware.com> | ||||
| Defa <zhoudefa666@163.com> | ||||
| demarey <christophe.demarey@inria.fr> | ||||
| dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | ||||
| Deric Crago <deric.crago@gmail.com> | ||||
| ditsuke <ditsuke@protonmail.com> | ||||
| Divyen Patel <divyenp@vmware.com> | ||||
| Dnyanesh Gate <dnyanesh.gate@druva.com> | ||||
| Doug MacEachern <dougm@vmware.com> | ||||
| East <60801291+houfangdong@users.noreply.github.com> | ||||
| Eloy Coto <eloy.coto@gmail.com> | ||||
| embano1 <embano1@users.noreply.github.com> | ||||
| Eng Zer Jun <engzerjun@gmail.com> | ||||
| Eric Edens <ericedens@google.com> | ||||
| Eric Graham <16710890+Pheric@users.noreply.github.com> | ||||
| Eric Gray <egray@vmware.com> | ||||
| Eric Yutao <eric.yutao@gmail.com> | ||||
| Erik Hollensbe <github@hollensbe.org> | ||||
| Essodjolo KAHANAM <essodjolo@kahanam.com> | ||||
| Ethan Kaley <ethan.kaley@emc.com> | ||||
| Evan Chu <echu@vmware.com> | ||||
| Fabio Rapposelli <fabio@vmware.com> | ||||
| Faiyaz Ahmed <faiyaza@vmware.com> | ||||
| Federico Pellegatta <12744504+federico-pellegatta@users.noreply.github.com> | ||||
| forkbomber <forkbomber@users.noreply.github.com> | ||||
| François Rigault <rigault.francois@gmail.com> | ||||
| freebsdly <qinhuajun@outlook.com> | ||||
| Gavin Gray <gavin@infinio.com> | ||||
| Gavrie Philipson <gavrie.philipson@elastifile.com> | ||||
| George Hicken <ghicken@vmware.com> | ||||
| Gerrit Renker <Gerrit.Renker@ctl.io> | ||||
| gthombare <gthombare@vmware.com> | ||||
| HakanSunay <hakansunay@abv.bg> | ||||
| Hasan Mahmood <mahmoodh@vmware.com> | ||||
| Haydon Ryan <haydon.ryan@gmail.com> | ||||
| Heiko Reese <hreese@users.noreply.github.com> | ||||
| Henrik Hodne <henrik@travis-ci.com> | ||||
| hkumar <hkumar@vmware.com> | ||||
| Hrabur Stoyanov <hstoyanov@vmware.com> | ||||
| hui luo <luoh@vmware.com> | ||||
| Ian Eyberg <ian@deferpanic.com> | ||||
| Isaac Rodman <isaac@eyz.us> | ||||
| Ivan Mikushin <imikushin@vmware.com> | ||||
| Ivan Porto Carrero <icarrero@vmware.com> | ||||
| James King <james.king@emc.com> | ||||
| James Peach <jpeach@vmware.com> | ||||
| Jason Kincl <jkincl@gmail.com> | ||||
| Jeremy Canady <jcanady@jackhenry.com> | ||||
| jeremy-clerc <jeremy@clerc.io> | ||||
| Jiatong Wang <wjiatong@vmware.com> | ||||
| jingyizPensando <jingyiz@pensando.io> | ||||
| João Pereira <joaodrp@gmail.com> | ||||
| Jonas Ausevicius <jonas.ausevicius@virtustream.com> | ||||
| Jorge Sevilla <jorge.sevilla@rstor.io> | ||||
| Julien PILLON <jpillon@lesalternatives.org> | ||||
| Justin J. Novack <jnovack@users.noreply.github.com> | ||||
| kayrus <kay.diam@gmail.com> | ||||
| Keenan Brock <keenan@thebrocks.net> | ||||
| Kevin George <georgek@vmware.com> | ||||
| Knappek <andy.knapp.ak@gmail.com> | ||||
| Leslie Wang <qiwa@pensando.io> | ||||
| leslie-qiwa <leslie.qiwa@gmail.com> | ||||
| Lintong Jiang <lintongj@vmware.com> | ||||
| Liping Xue <lipingx@vmware.com> | ||||
| Louie Jiang <jiangl@vmware.com> | ||||
| Luther Monson <luther.monson@gmail.com> | ||||
| Madanagopal Arunachalam <marunachalam@vmware.com> | ||||
| makelarisjr <8687447+makelarisjr@users.noreply.github.com> | ||||
| maplain <fangyuanl@vmware.com> | ||||
| Marc Carmier <mcarmier@gmail.com> | ||||
| Marcus Tan <marcus.tan@rubrik.com> | ||||
| Maria Ntalla <maria.ntalla@gmail.com> | ||||
| Marin Atanasov Nikolov <mnikolov@vmware.com> | ||||
| Mario Trangoni <mjtrangoni@gmail.com> | ||||
| Mark Dechiaro <mdechiaro@users.noreply.github.com> | ||||
| Mark Peek <markpeek@vmware.com> | ||||
| Mark Rexwinkel <Mark.Rexwinkel@elekta.com> | ||||
| martin <martin@catai.org> | ||||
| Matt Clay <matt@mystile.com> | ||||
| Matt Moore <mattmoor@vmware.com> | ||||
| Matt Moriarity <matt@mattmoriarity.com> | ||||
| Matthew Cosgrove <matthew.cosgrove@dell.com> | ||||
| mbhadale <mbhadale@vmware.com> | ||||
| Merlijn Sebrechts <merlijn.sebrechts@gmail.com> | ||||
| Mevan Samaratunga <mevansam@gmail.com> | ||||
| Michael Gasch <15986659+embano1@users.noreply.github.com> | ||||
| Michael Gasch <mgasch@vmware.com> | ||||
| Michal Jankowski <mjankowski@vmware.com> | ||||
| Mike Schinkel <mike@newclarity.net> | ||||
| Mincho Tonev <mtonev@vmware.com> | ||||
| mingwei <mingwei@smartx.com> | ||||
| Nicolas Lamirault <nicolas.lamirault@gmail.com> | ||||
| Nikhil Kathare <nikhil.kathare@netapp.com> | ||||
| Nikhil R Deshpande <ndeshpande@vmware.com> | ||||
| Nikolas Grottendieck <git@nikolasgrottendieck.com> | ||||
| Nils Elde <nils.elde@sscinc.com> | ||||
| nirbhay <nirbhay.bagmar@nutanix.com> | ||||
| Nobuhiro MIKI <nmiki@yahoo-corp.jp> | ||||
| Om Kumar <om.kumar@hpe.com> | ||||
| Omar Kohl <omarkohl@gmail.com> | ||||
| Parham Alvani <parham.alvani@gmail.com> | ||||
| Parveen Chahal <parkuma@microsoft.com> | ||||
| Paul Martin <25058109+rawstorage@users.noreply.github.com> | ||||
| Pierre Gronlier <pierre.gronlier@corp.ovh.com> | ||||
| Pieter Noordhuis <pnoordhuis@vmware.com> | ||||
| pradeepj <50135054+pradeep288@users.noreply.github.com> | ||||
| Pranshu Jain <jpranshu@vmware.com> | ||||
| prydin <prydin@vmware.com> | ||||
| rconde01 <rconde01@hotmail.com> | ||||
| rHermes <teodor_spaeren@riseup.net> | ||||
| Rianto Wahyudi <rwahyudi@gmail.com> | ||||
| Ricardo Katz <rkatz@vmware.com> | ||||
| Robin Watkins <robwatkins@gmail.com> | ||||
| Rowan Jacobs <rojacobs@pivotal.io> | ||||
| Roy Ling <royling0024@gmail.com> | ||||
| rsikdar <rsikdar@berkeley.edu> | ||||
| runner.mei <runner.mei@gmail.com> | ||||
| Ryan Johnson <johnsonryan@vmware.com> | ||||
| S R Ashrith <sashrith@vmware.com> | ||||
| S.Çağlar Onur <conur@vmware.com> | ||||
| Saad Malik <saad@spectrocloud.com> | ||||
| Sam Zhu <zhusa@zhusa-a02.vmware.com> | ||||
| samzhu333 <45263849+samzhu333@users.noreply.github.com> | ||||
| Sandeep Pissay Srinivasa Rao <ssrinivas@vmware.com> | ||||
| Scott Holden <scott@nullops.io> | ||||
| Sergey Ignatov <sergey.ignatov@jetbrains.com> | ||||
| serokles <timbo.alexander@gmail.com> | ||||
| shahra <shahra@vmware.com> | ||||
| Shalini Bhaskara <sbhaskara@vmware.com> | ||||
| Shaozhen Ding <dsz0111@gmail.com> | ||||
| Shawn Neal <sneal@sneal.net> | ||||
| shylasrinivas <sshyla@vmware.com> | ||||
| sky-joker <sky.jokerxx@gmail.com> | ||||
| smaftoul <samuel.maftoul@gmail.com> | ||||
| smahadik <smahadik@vmware.com> | ||||
| Sten Feldman <exile@chamber.ee> | ||||
| Stepan Mazurov <smazurov@gmail.com> | ||||
| Steve Purcell <steve@sanityinc.com> | ||||
| Sudhindra Aithal <sudhiaithal@pensando.io> | ||||
| SUMIT AGRAWAL <asumit@vmware.com> | ||||
| Sunny Carter <sunny.carter@metaswitch.com> | ||||
| syuparn <s.hello.spagetti@gmail.com> | ||||
| Takaaki Furukawa <takaaki.frkw@gmail.com> | ||||
| Tamas Eger <tamas.eger@bitrise.io> | ||||
| Tanay Kothari <tkothari@vmware.com> | ||||
| tanishi <tanishi503@gmail.com> | ||||
| Ted Zlatanov <tzz@lifelogs.com> | ||||
| Thad Craft <tcraft@pivotal.io> | ||||
| Thibaut Ackermann <thibaut.ackermann@alcatel-lucent.com> | ||||
| Tim McNamara <tim.mcnamara@canonical.com> | ||||
| Tjeu Kayim <15987676+TjeuKayim@users.noreply.github.com> | ||||
| Toomas Pelberg <toomas.pelberg@playtech.com> | ||||
| Trevor Dawe <trevor.dawe@gmail.com> | ||||
| tshihad <tshihad9@gmail.com> | ||||
| Uwe Bessle <Uwe.Bessle@iteratec.de> | ||||
| Vadim Egorov <vegorov@vmware.com> | ||||
| Vikram Krishnamurthy <vikramkrishnamu@vmware.com> | ||||
| volanja <volaaanja@gmail.com> | ||||
| Volodymyr Bobyr <pupsua@gmail.com> | ||||
| Waldek Maleska <w.maleska@gmail.com> | ||||
| William Lam <wlam@vmware.com> | ||||
| Witold Krecicki <wpk@culm.net> | ||||
| xing-yang <xingyang105@gmail.com> | ||||
| xinyanw409 <wxinyan@vmware.com> | ||||
| Yang Yang <yangy@vmware.com> | ||||
| yangxi <yangxi@vmware.com> | ||||
| Yann Hodique <yhodique@google.com> | ||||
| Yash Nitin Desai <desaiy@vmware.com> | ||||
| Yassine TIJANI <ytijani@vmware.com> | ||||
| Yi Jiang <yijiang@vmware.com> | ||||
| yiyingy <yiyingy@vmware.com> | ||||
| ykakarap <yuva2811@gmail.com> | ||||
| Yogesh Sobale <6104071+ysobale@users.noreply.github.com> | ||||
| Yue Yin <yueyin@yuyin-a01.vmware.com> | ||||
| Yun Zhou <yunz@vmware.com> | ||||
| Yuya Kusakabe <yuya.kusakabe@gmail.com> | ||||
| Zach G <zguan@vmware.com> | ||||
| Zach Tucker <ztucker@vmware.com> | ||||
| Zacharias Taubert <zacharias.taubert@gmail.com> | ||||
| Zee Yang <zeey@vmware.com> | ||||
| zyuxin <zyuxin@vmware.com> | ||||
| Кузаков Евгений <kuzakov@satel.org> | ||||
							
								
								
									
										45
									
								
								vendor/github.com/vmware/govmomi/Dockerfile.govc
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										45
									
								
								vendor/github.com/vmware/govmomi/Dockerfile.govc
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,45 +0,0 @@ | ||||
| # Create a builder container | ||||
| # golang:1.18.0-buster amd64 | ||||
| FROM golang@sha256:7d39537344486528f8cdb3bd8adb98ab7f0f4236044b6944fed8631da35a4ce5 AS build | ||||
| WORKDIR /go/src/app | ||||
|  | ||||
| # Create appuser to isolate potential vulnerabilities | ||||
| # See https://stackoverflow.com/a/55757473/12429735 | ||||
| ENV USER=appuser | ||||
| ENV UID=10001 | ||||
| RUN adduser \ | ||||
|     --disabled-password \ | ||||
|     --gecos "" \ | ||||
|     --shell "/sbin/nologin" \ | ||||
|     --no-create-home \ | ||||
|     --uid "${UID}" \ | ||||
|     "${USER}" | ||||
|  | ||||
| # Create a new tmp directory so no bad actors can manipulate it | ||||
| RUN mkdir /temporary-tmp-directory && chmod 777 /temporary-tmp-directory | ||||
|  | ||||
| ############################################################################### | ||||
| # Final stage | ||||
| FROM scratch | ||||
|  | ||||
| # Allow container to use latest TLS certificates | ||||
| COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ | ||||
|  | ||||
| # Copy over appuser to run as non-root | ||||
| COPY --from=build /etc/passwd /etc/passwd | ||||
| COPY --from=build /etc/group /etc/group | ||||
|  | ||||
| # Copy over the /tmp directory for golang/os.TmpDir | ||||
| COPY --chown=appuser --from=build /temporary-tmp-directory /tmp | ||||
|  | ||||
| # Copy application from external build | ||||
| COPY govc /govc | ||||
|  | ||||
| # Run all commands as non-root | ||||
| USER appuser:appuser | ||||
|  | ||||
| # session cache, etc | ||||
| ENV GOVMOMI_HOME=/tmp | ||||
|  | ||||
| # Set CMD to application with container defaults | ||||
| CMD ["/govc"] | ||||
							
								
								
									
										47
									
								
								vendor/github.com/vmware/govmomi/Dockerfile.vcsim
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										47
									
								
								vendor/github.com/vmware/govmomi/Dockerfile.vcsim
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,47 +0,0 @@ | ||||
| # Create a builder container | ||||
| # golang:1.18.0-buster amd64 | ||||
| FROM golang@sha256:7d39537344486528f8cdb3bd8adb98ab7f0f4236044b6944fed8631da35a4ce5 AS build | ||||
| WORKDIR /go/src/app | ||||
|  | ||||
| # Create appuser to isolate potential vulnerabilities | ||||
| # See https://stackoverflow.com/a/55757473/12429735 | ||||
| ENV USER=appuser | ||||
| ENV UID=10001 | ||||
| RUN adduser \ | ||||
|     --disabled-password \ | ||||
|     --gecos "" \ | ||||
|     --home "/nonexistent" \ | ||||
|     --shell "/sbin/nologin" \ | ||||
|     --no-create-home \ | ||||
|     --uid "${UID}" \ | ||||
|     "${USER}" | ||||
|  | ||||
| # Create a new tmp directory so no bad actors can manipulate it | ||||
| RUN mkdir /temporary-tmp-directory && chmod 777 /temporary-tmp-directory | ||||
|  | ||||
| ############################################################################### | ||||
| # Final stage | ||||
| FROM scratch | ||||
|  | ||||
| # Run all commands as non-root | ||||
| USER appuser:appuser | ||||
|  | ||||
| # Allow container to use latest TLS certificates | ||||
| COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ | ||||
|  | ||||
| # Copy over appuser to run as non-root | ||||
| COPY --from=build /etc/passwd /etc/passwd | ||||
| COPY --from=build /etc/group /etc/group | ||||
|  | ||||
| # Copy over the /tmp directory for golang/os.TmpDir | ||||
| COPY --chown=appuser --from=build /temporary-tmp-directory /tmp | ||||
|  | ||||
| # Expose application port | ||||
| EXPOSE 8989 | ||||
|  | ||||
| # Copy application from external build | ||||
| COPY vcsim /vcsim | ||||
|  | ||||
| # Set entrypoint to application with container defaults | ||||
| ENTRYPOINT [ "/vcsim" ] | ||||
| CMD ["-l", "0.0.0.0:8989"] | ||||
							
								
								
									
										202
									
								
								vendor/github.com/vmware/govmomi/LICENSE.txt
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										202
									
								
								vendor/github.com/vmware/govmomi/LICENSE.txt
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,202 +0,0 @@ | ||||
|  | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										158
									
								
								vendor/github.com/vmware/govmomi/Makefile
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										158
									
								
								vendor/github.com/vmware/govmomi/Makefile
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,158 +0,0 @@ | ||||
| # Copyright (c) 2021 VMware, Inc. All Rights Reserved. | ||||
| # SPDX-License-Identifier: Apache-2.0 | ||||
|  | ||||
| # If you update this file, please follow | ||||
| # https://www.thapaliya.com/en/writings/well-documented-makefiles/ | ||||
|  | ||||
| # Ensure Make is run with bash shell as some syntax below is bash-specific | ||||
| SHELL := /usr/bin/env bash | ||||
|  | ||||
| # Print the help/usage when make is executed without any other arguments | ||||
| .DEFAULT_GOAL := help | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Help | ||||
| ## -------------------------------------- | ||||
|  | ||||
| .PHONY: help | ||||
| help: ## Display usage | ||||
| 	@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n  make [target] \033[36m\033[0m\n\nTargets:\n"} /^[a-zA-Z_-]+:.*?##/ { printf "  \033[36m%-20s\033[0m %s\n", $$1, $$2 }' $(MAKEFILE_LIST) | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Locations and programs | ||||
| ## -------------------------------------- | ||||
|  | ||||
| # Directories | ||||
| BIN_DIR       := bin | ||||
| TOOLS_DIR     := hack/tools | ||||
| TOOLS_BIN_DIR := $(TOOLS_DIR)/bin | ||||
|  | ||||
| # Tooling binaries | ||||
| GO            ?= $(shell command -v go 2>/dev/null) | ||||
| GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Prerequisites | ||||
| ## -------------------------------------- | ||||
|  | ||||
| # Do not proceed unless the go binary is present. | ||||
| ifeq (,$(strip $(GO))) | ||||
| $(error The "go" program cannot be found) | ||||
| endif | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Linting and fixing linter errors | ||||
| ## -------------------------------------- | ||||
|  | ||||
| .PHONY: lint | ||||
| lint: ## Run all the lint targets | ||||
| 	$(MAKE) lint-go-full | ||||
|  | ||||
| GOLANGCI_LINT_FLAGS ?= --fast=true | ||||
| .PHONY: lint-go | ||||
| lint-go: $(GOLANGCI_LINT) ## Lint codebase | ||||
| 	$(GOLANGCI_LINT) run -v $(GOLANGCI_LINT_FLAGS) | ||||
|  | ||||
| .PHONY: lint-go-full | ||||
| lint-go-full: GOLANGCI_LINT_FLAGS = --fast=false | ||||
| lint-go-full: lint-go ## Run slower linters to detect possible issues | ||||
|  | ||||
| .PHONY: fix | ||||
| fix: GOLANGCI_LINT_FLAGS = --fast=false --fix | ||||
| fix: lint-go ## Tries to fix errors reported by lint-go-full target | ||||
|  | ||||
| .PHONY: check | ||||
| check: lint-go-full | ||||
| check: 	## Run linters | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Tooling Binaries | ||||
| ## -------------------------------------- | ||||
|  | ||||
| TOOLING_BINARIES := $(GOLANGCI_LINT) | ||||
| tools: $(TOOLING_BINARIES) ## Build tooling binaries | ||||
| .PHONY: $(TOOLING_BINARIES) | ||||
| $(TOOLING_BINARIES): | ||||
| 	cd $(TOOLS_DIR); make $(@F) | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Build / Install | ||||
| ## -------------------------------------- | ||||
| .PHONY: install | ||||
| install: ## Install govc and vcsim | ||||
| 	$(MAKE) -C govc install | ||||
| 	$(MAKE) -C vcsim install | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Generate | ||||
| ## -------------------------------------- | ||||
|  | ||||
| .PHONY: mod | ||||
| mod: ## Runs go mod tidy to validate modules | ||||
| 	go mod tidy -v | ||||
|  | ||||
| .PHONY: mod-get | ||||
| mod-get: ## Downloads and caches the modules | ||||
| 	go mod download | ||||
|  | ||||
| .PHONY: doc | ||||
| doc: install | ||||
| doc: ## Generates govc USAGE.md | ||||
| 	./govc/usage.sh > ./govc/USAGE.md | ||||
|  | ||||
|  | ||||
| ## -------------------------------------- | ||||
| ## Tests | ||||
| ## -------------------------------------- | ||||
|  | ||||
| # Test options | ||||
| TEST_COUNT ?= 1 | ||||
| TEST_TIMEOUT ?= 5m | ||||
| TEST_RACE_HISTORY_SIZE ?= 5 | ||||
| GORACE ?= history_size=$(TEST_RACE_HISTORY_SIZE) | ||||
|  | ||||
| ifeq (-count,$(findstring -count,$(TEST_OPTS))) | ||||
| $(error Use TEST_COUNT to override this option) | ||||
| endif | ||||
|  | ||||
| ifeq (-race,$(findstring -race,$(TEST_OPTS))) | ||||
| $(error The -race flag is enabled by default & cannot be specified in TEST_OPTS) | ||||
| endif | ||||
|  | ||||
| ifeq (-timeout,$(findstring -timeout,$(TEST_OPTS))) | ||||
| $(error Use TEST_TIMEOUT to override this option) | ||||
| endif | ||||
|  | ||||
| .PHONY: go-test | ||||
| go-test: ## Runs go unit tests with race detector enabled | ||||
| 	GORACE=$(GORACE) $(GO) test \ | ||||
|   -count $(TEST_COUNT) \ | ||||
|   -race \ | ||||
|   -timeout $(TEST_TIMEOUT) \ | ||||
|   -v $(TEST_OPTS) \ | ||||
|   ./... | ||||
|  | ||||
| .PHONY: govc-test | ||||
| govc-test: install | ||||
| govc-test: ## Runs govc bats tests | ||||
| 	./govc/test/images/update.sh | ||||
| 	(cd govc/test && ./vendor/github.com/sstephenson/bats/libexec/bats -t .) | ||||
|  | ||||
| .PHONY: govc-test-sso | ||||
| govc-test-sso: install | ||||
| 	./govc/test/images/update.sh | ||||
| 	(cd govc/test && SSO_BATS=1 ./vendor/github.com/sstephenson/bats/libexec/bats -t sso.bats) | ||||
|  | ||||
| .PHONY: govc-test-sso-assert-cert | ||||
| govc-test-sso-assert-cert: | ||||
| 	SSO_BATS_ASSERT_CERT=1 $(MAKE) govc-test-sso | ||||
|  | ||||
| .PHONY: test | ||||
| test: go-test govc-test	## Runs go-test and govc-test | ||||
							
								
								
									
										131
									
								
								vendor/github.com/vmware/govmomi/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										131
									
								
								vendor/github.com/vmware/govmomi/README.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,131 +0,0 @@ | ||||
| <!-- markdownlint-disable first-line-h1 no-inline-html --> | ||||
|  | ||||
| [][ci-build] | ||||
| [][ci-tests] | ||||
| [][go-report-card] | ||||
| [][latest-release] | ||||
| [][go-reference] | ||||
| [][go-version] | ||||
|  | ||||
| # govmomi | ||||
|  | ||||
| A Go library for interacting with VMware vSphere APIs (ESXi and/or vCenter Server). | ||||
|  | ||||
| In addition to the vSphere API client, this repository includes: | ||||
|  | ||||
| * [govc][govc] - vSphere CLI | ||||
| * [vcsim][vcsim] - vSphere API mock framework | ||||
| * [toolbox][toolbox] - VM guest tools framework | ||||
|  | ||||
| ## Compatibility | ||||
|  | ||||
| This library supports vCenter Server and ESXi versions following the [VMware Product Lifecycle Matrix][reference-lifecycle]. | ||||
|  | ||||
| Product versions that are end of support may work, but are not officially supported. | ||||
|  | ||||
| ## Documentation | ||||
|  | ||||
| The APIs exposed by this library closely follow the API described in the [VMware vSphere API Reference Documentation][reference-api]. Refer to the documentation to become familiar with the upstream API. | ||||
|  | ||||
| The code in the `govmomi` package is a wrapper for the code that is generated from the vSphere API description. It primarily provides convenience functions for working with the vSphere API. See [godoc.org][reference-godoc] for documentation. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| ### govmomi (Package) | ||||
|  | ||||
| ```bash | ||||
| go get -u github.com/vmware/govmomi | ||||
| ``` | ||||
|  | ||||
| ### Binaries and Docker Images for `govc` and `vcsim` | ||||
|  | ||||
| Installation instructions, released binaries, and Docker images are documented in the respective README files of [`govc`][govc] and [`vcsim`][vcsim]. | ||||
|  | ||||
| ## Discussion | ||||
|  | ||||
| The project encourages the community to collaborate using GitHub [issues][govmomi-github-issues], GitHub [discussions][govmomi-github-discussions], and [Slack][slack-channel]. | ||||
|  | ||||
| > **Note** | ||||
| > Access to Slack requires a free [VMware {code}][slack-join] developer program membership. | ||||
|  | ||||
| ## Status | ||||
|  | ||||
| Changes to the API are subject to [semantic versioning][reference-semver]. | ||||
|  | ||||
| Refer to the [CHANGELOG][govmomi-changelog] for version to version changes. | ||||
|  | ||||
| ## Notable Projects Using govmomi | ||||
|  | ||||
| * [collectd-vsphere][project-travisci-collectd-vsphere] | ||||
| * [Docker LinuxKit][project-docker-linuxKit] | ||||
| * [Elastic Agent VMware vSphere integration][project-elastic-agent] | ||||
| * [Gru][project-gru] | ||||
| * [Juju][project-juju] | ||||
| * [Jupiter Brain][project-travisci-jupiter-brain] | ||||
| * [Kubernetes vSphere Cloud Provider][project-k8s-cloud-provider] | ||||
| * [Kubernetes Cluster API][project-k8s-cluster-api] | ||||
| * [OPS][project-nanovms-ops] | ||||
| * [Packer Plugin for VMware vSphere][project-hashicorp-packer-plugin-vsphere] | ||||
| * [Rancher][project-rancher] | ||||
| * [Terraform Provider for VMware vSphere][project-hashicorp-terraform-provider-vsphere] | ||||
| * [Telegraf][project-influxdata-telegraf] | ||||
| * [VMware Event Broker Appliance][project-vmware-veba] | ||||
| * [VMware vSphere Integrated Containers Engine][project-vmware-vic] | ||||
| * [VMware vSphere 7.0][project-vmware-vsphere] | ||||
|  | ||||
| ## Related Projects | ||||
|  | ||||
| * [go-vmware-nsxt][reference-go-vmware-nsxt] | ||||
| * [pyvmomi][reference-pyvmomi] | ||||
| * [rbvmomi][reference-rbvmomi] | ||||
|  | ||||
| ## License | ||||
|  | ||||
| govmomi is available under the [Apache 2 License][govmomi-license]. | ||||
|  | ||||
| ## Name | ||||
|  | ||||
| Pronounced: _go·v·mom·ie_ | ||||
|  | ||||
| Follows pyvmomi and rbvmomi: language prefix + the vSphere acronym "VM Object Management Infrastructure". | ||||
|  | ||||
| [//]: Links | ||||
|  | ||||
| [ci-build]: https://github.com/vmware/govmomi/actions/workflows/govmomi-build.yaml | ||||
| [ci-tests]: https://github.com/vmware/govmomi/actions/workflows/govmomi-go-tests.yaml | ||||
| [latest-release]: https://github.com/vmware/govmomi/releases/latest | ||||
| [govc]: govc/README.md | ||||
| [govmomi-github-issues]: https://github.com/vmware/govmomi/issues | ||||
| [govmomi-github-discussions]: https://github.com/vmware/govmomi/discussions | ||||
| [govmomi-changelog]: CHANGELOG.md | ||||
| [govmomi-license]: LICENSE.txt | ||||
| [go-reference]: https://pkg.go.dev/github.com/vmware/govmomi | ||||
| [go-report-card]: https://goreportcard.com/report/github.com/vmware/govmomi | ||||
| [go-version]: https://github.com/vmware/govmomi | ||||
| [project-docker-linuxKit]: https://github.com/linuxkit/linuxkit/tree/master/src/cmd/linuxkit | ||||
| [project-elastic-agent]: https://github.com/elastic/integrations/tree/main/packages/vsphere | ||||
| [project-gru]: https://github.com/dnaeon/gru | ||||
| [project-hashicorp-packer-plugin-vsphere]: https://github.com/hashicorp/packer-plugin-vsphere | ||||
| [project-hashicorp-terraform-provider-vsphere]: https://github.com/hashicorp/terraform-provider-vsphere | ||||
| [project-influxdata-telegraf]: https://github.com/influxdata/telegraf/tree/master/plugins/inputs/vsphere | ||||
| [project-juju]: https://github.com/juju/juju | ||||
| [project-k8s-cloud-provider]: https://github.com/kubernetes/cloud-provider-vsphere | ||||
| [project-k8s-cluster-api]: https://github.com/kubernetes-sigs/cluster-api-provider-vsphere | ||||
| [project-nanovms-ops]: https://github.com/nanovms/ops | ||||
| [project-rancher]: https://github.com/rancher/rancher/blob/master/pkg/api/norman/customization/vsphere/listers.go | ||||
| [project-travisci-collectd-vsphere]: https://github.com/travis-ci/collectd-vsphere | ||||
| [project-travisci-jupiter-brain]: https://github.com/travis-ci/jupiter-brain | ||||
| [project-vmware-veba]: https://github.com/vmware-samples/vcenter-event-broker-appliance/tree/development/vmware-event-router | ||||
| [project-vmware-vic]: https://github.com/vmware/vic | ||||
| [project-vmware-vsphere]: https://docs.vmware.com/en/VMware-vSphere/7.0/rn/vsphere-esxi-vcenter-server-7-vsphere-with-kubernetes-release-notes.html | ||||
| [reference-api]: https://developer.vmware.com/apis/968/vsphere | ||||
| [reference-godoc]: http://godoc.org/github.com/vmware/govmomi | ||||
| [reference-go-vmware-nsxt]: https://github.com/vmware/go-vmware-nsxt | ||||
| [reference-lifecycle]: https://lifecycle.vmware.com | ||||
| [reference-pyvmomi]: https://github.com/vmware/pyvmomi | ||||
| [reference-rbvmomi]: https://github.com/vmware/rbvmomi | ||||
| [reference-semver]: http://semver.org | ||||
| [slack-join]: https://developer.vmware.com/join/ | ||||
| [slack-channel]: https://vmwarecode.slack.com/messages/govmomi | ||||
| [toolbox]: toolbox/README.md | ||||
| [vcsim]: vcsim/README.md | ||||
							
								
								
									
										225
									
								
								vendor/github.com/vmware/govmomi/RELEASE.md
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										225
									
								
								vendor/github.com/vmware/govmomi/RELEASE.md
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,225 +0,0 @@ | ||||
| # How to create a `govmomi` Release on Github | ||||
|  | ||||
| > **Note**  | ||||
| > | ||||
| > The steps outlined in this document can only be performed by maintainers or | ||||
| > administrators of this project. | ||||
|  | ||||
| The release automation is based on Github | ||||
| [Actions](https://github.com/features/actions) and has been improved over time | ||||
| to simplify the experience for creating `govmomi` releases. | ||||
|  | ||||
| The Github Actions release [workflow](.github/workflows/govmomi-release.yaml) | ||||
| uses [`goreleaser`](http://goreleaser.com/) and automatically creates/pushes: | ||||
|  | ||||
| - Release artifacts for `govc` and `vcsim` to the | ||||
|   [release](https://github.com/vmware/govmomi/releases) page, including | ||||
|   `LICENSE.txt`, `README` and `CHANGELOG` | ||||
| - Docker images for `vmware/govc` and `vmware/vcsim` to Docker Hub | ||||
| - Source code | ||||
|  | ||||
| Starting with release tag `v0.29.0`, releases are not tagged on the `master` | ||||
| branch anymore but a dedicated release branch, for example `release-0.29`. This | ||||
| process has already been followed for patch releases and back-ports. | ||||
|  | ||||
| > **Warning**  | ||||
| > | ||||
| > If you create a release after the `v0.29.0` tag, start | ||||
| > [here](#creating-a-release-after-v0290). To create a release with an older | ||||
| > tag, e.g. cherrypick or back-port, continue | ||||
| > [here](#creating-a-release-before-v0290). | ||||
|  | ||||
| ## Creating a release after Version `v0.29.0` | ||||
|  | ||||
| The release process from `v0.29.0` has been further simplified and is done | ||||
| through the Github UI. The only pre-requirement is creating a release branch, | ||||
| which can be done through the Github UI or `git` CLI. | ||||
|  | ||||
| This guide describes the CLI process. | ||||
|  | ||||
| ### Verify `master` branch is up to date with the remote | ||||
|  | ||||
| ```console | ||||
| git checkout master | ||||
| git fetch -avp | ||||
| git diff master origin/master | ||||
|  | ||||
| # if your local and remote branches diverge run | ||||
| git pull origin/master | ||||
| ``` | ||||
|  | ||||
| > **Warning**  | ||||
| > | ||||
| > These steps assume `origin` to point to the remote | ||||
| > `https://github.com/vmware/govmomi`, respectively | ||||
| > `git@github.com:vmware/govmomi`. | ||||
|  | ||||
| ### Create a release branch | ||||
|  | ||||
| For new releases, create a release branch from the most recent commit in | ||||
| `master`, e.g. `release-0.30`. | ||||
|  | ||||
| ```console | ||||
| export RELEASE_BRANCH=release-0.30 | ||||
| git checkout -b ${RELEASE_BRANCH} | ||||
| ``` | ||||
|  | ||||
| For maintenance/patch releases on **existing** release branches **after** tag | ||||
| `v0.29.0` simply checkout the existing release branch and add commits to the | ||||
| existing release branch. | ||||
|  | ||||
| ### Verify `make docs` and `CONTRIBUTORS` are up to date | ||||
|  | ||||
| > **Warning** | ||||
| >  | ||||
| > Run the following commands and commit any changes to the release branch before | ||||
| > proceeding with the release. | ||||
|  | ||||
| ```console | ||||
| make doc | ||||
| ./scripts/contributors.sh | ||||
| if [ -z "$(git status --porcelain)" ]; then  | ||||
|   echo "working directory clean: proceed with release" | ||||
| else  | ||||
|   echo "working directory dirty: please commit changes" | ||||
| fi | ||||
|  | ||||
| # perform git add && git commit ... in case there were changes | ||||
| ``` | ||||
|  | ||||
| ### Push the release branch | ||||
|  | ||||
| > **Warning** | ||||
| > | ||||
| > Do not create a tag as this will be done by the release automation. | ||||
|  | ||||
| The final step is pushing the new/updated release branch.  | ||||
|  | ||||
| ```console | ||||
| git push origin ${RELEASE_BRANCH} | ||||
| ``` | ||||
|  | ||||
| ### Create a release in the Github UI | ||||
|  | ||||
| Open the `govmomi` Github [repository](https://github.com/vmware/govmomi) and | ||||
| navigate to `Actions -> Workflows -> Release`. | ||||
|  | ||||
| Click `Run Workflow` which opens a dropdown list. | ||||
|  | ||||
| Select the new/updated branch, e.g. `release-0.30`, i.e. **not** the `master` | ||||
| branch. | ||||
|  | ||||
| Specify a semantic `tag` to associate with the release, e.g. `v0.30.0`.  | ||||
|  | ||||
| > **Warning** | ||||
| > | ||||
| > This tag **must not** exist or the release will fail during the validation | ||||
| > phase. | ||||
|  | ||||
| By default, a dry-run is performed to rule out most (but not all) errors during | ||||
| a release. If you do not want to perform a dry-run, e.g. to finally create a | ||||
| release, deselect the `Verify release workflow ...` checkbox. | ||||
|  | ||||
| Click `Run Workflow` to kick off the workflow. | ||||
|  | ||||
| After successful completion and if the newly created `tag` is the **latest** | ||||
| (semantic version sorted) tag in the repository, a PR is automatically opened | ||||
| against the `master` branch to update the `CHANGELOG`. Please review and merge | ||||
| accordingly. | ||||
|  | ||||
| ## Creating a release before Version `v0.29.0` | ||||
|  | ||||
| The release process before `v0.29.0` differs since it's based on manually | ||||
| creating and pushing tags. Here, on every new tag matching `v*` pushed to the | ||||
| repository a Github Action Release Workflow is executed.  | ||||
|  | ||||
| ### Verify `master` branch is up to date with the remote | ||||
|  | ||||
| ```console | ||||
| git checkout master | ||||
| git fetch -avp | ||||
| git diff master origin/master | ||||
|  | ||||
| # if your local and remote branches diverge run | ||||
| git pull origin/master | ||||
| ``` | ||||
|  | ||||
| > **Warning**  | ||||
| > | ||||
| > These steps assume `origin` to point to the remote | ||||
| > `https://github.com/vmware/govmomi`, respectively | ||||
| > `git@github.com:vmware/govmomi`. | ||||
|  | ||||
| ### Create a release branch | ||||
|  | ||||
| Pick a reference (commit, branch or tag) **before** the `v0.29.0` tag and create | ||||
| a release branch from there. | ||||
|  | ||||
| The following example creates a cherrypick release (`v0.28.1`) based on the | ||||
| `v0.28.0` tag. | ||||
|  | ||||
| ```console | ||||
| export RELEASE_BRANCH=release-0.28 | ||||
| git checkout -b ${RELEASE_BRANCH} v0.28.0 | ||||
| ``` | ||||
|  | ||||
| Optionally, incorporate (cherry-pick) commits into the branch.  | ||||
|  | ||||
| > **Warning**  | ||||
| > | ||||
| > Make sure that these commits/ranges do not contain commits after the `v0.29.0` | ||||
| > tag which include release automation changes, i.e. files in `.github/workflows/`! | ||||
|  | ||||
| ### Verify `make docs` and `CONTRIBUTORS` are up to date | ||||
|  | ||||
| > **Warning** | ||||
| >  | ||||
| > Run the following commands and commit any changes to the release branch before | ||||
| > proceeding with the release. | ||||
|  | ||||
| ```console | ||||
| make doc | ||||
| ./scripts/contributors.sh | ||||
| if [ -z "$(git status --porcelain)" ]; then  | ||||
|   echo "working directory clean: proceed with release" | ||||
| else  | ||||
|   echo "working directory dirty: please commit changes" | ||||
| fi | ||||
|  | ||||
| # perform git add && git commit ... in case there were changes | ||||
| ``` | ||||
|  | ||||
| ### Set `RELEASE_VERSION` variable | ||||
|  | ||||
| This variable is used and referenced in the subsequent commands. Set it to the | ||||
| **upcoming** release version, adhering to the [semantic | ||||
| versioning](https://semver.org/) scheme: | ||||
|  | ||||
| ```console | ||||
| export RELEASE_VERSION=v0.28.1 | ||||
| ``` | ||||
|  | ||||
| ### Create the Git Tag | ||||
|  | ||||
| ```console | ||||
| git tag -a ${RELEASE_VERSION} -m "Release ${RELEASE_VERSION}" | ||||
| ``` | ||||
|  | ||||
| ### Push the new Tag | ||||
|  | ||||
| ```console | ||||
| # Will trigger Github Actions Release Workflow | ||||
| git push --atomic origin ${RELEASE_BRANCH} refs/tags/${RELEASE_VERSION} | ||||
| ``` | ||||
|  | ||||
| ### Verify Github Action Release Workflow | ||||
|  | ||||
| After pushing a new release tag, the status of the workflow can be inspected | ||||
| [here](https://github.com/vmware/govmomi/actions/workflows/govmomi-release.yaml). | ||||
|  | ||||
|  | ||||
|  | ||||
| After a successful release, a pull request is automatically created by the | ||||
| Github Actions bot to update the [CHANGELOG](CHANGELOG.md). This `CHANGELOG.md` | ||||
| is also generated with `git-chglog` but uses a slightly different template | ||||
| (`.chglog/CHANGELOG.tpl.md`) for rendering (issue/PR refs are excluded). | ||||
							
								
								
									
										136
									
								
								vendor/github.com/vmware/govmomi/client.go
									
									
									
										generated
									
									
										vendored
									
									
								
							
							
						
						
									
										136
									
								
								vendor/github.com/vmware/govmomi/client.go
									
									
									
										generated
									
									
										vendored
									
									
								
							| @@ -1,136 +0,0 @@ | ||||
| /* | ||||
| Copyright (c) 2014-2016 VMware, Inc. All Rights Reserved. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| /* | ||||
| This package is the root package of the govmomi library. | ||||
|  | ||||
| The library is structured as follows: | ||||
|  | ||||
| Package vim25 | ||||
|  | ||||
| The minimal usable functionality is available through the vim25 package. | ||||
| It contains subpackages that contain generated types, managed objects, and all | ||||
| available methods. The vim25 package is entirely independent of the other | ||||
| packages in the govmomi tree -- it has no dependencies on its peers. | ||||
|  | ||||
| The vim25 package itself contains a client structure that is | ||||
| passed around throughout the entire library. It abstracts a session and its | ||||
| immutable state. See the vim25 package for more information. | ||||
|  | ||||
| Package session | ||||
|  | ||||
| The session package contains an abstraction for the session manager that allows | ||||
| a user to login and logout. It also provides access to the current session | ||||
| (i.e. to determine if the user is in fact logged in) | ||||
|  | ||||
| Package object | ||||
|  | ||||
| The object package contains wrappers for a selection of managed objects. The | ||||
| constructors of these objects all take a *vim25.Client, which they pass along | ||||
| to derived objects, if applicable. | ||||
|  | ||||
| Package govc | ||||
|  | ||||
| The govc package contains the govc CLI. The code in this tree is not intended | ||||
| to be used as a library. Any functionality that govc contains that _could_ be | ||||
| used as a library function but isn't, _should_ live in a root level package. | ||||
|  | ||||
| Other packages | ||||
|  | ||||
| Other packages, such as "event", "guest", or "license", provide wrappers for | ||||
| the respective subsystems. They are typically not needed in normal workflows so | ||||
| are kept outside the object package. | ||||
| */ | ||||
| package govmomi | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net/url" | ||||
|  | ||||
| 	"github.com/vmware/govmomi/property" | ||||
| 	"github.com/vmware/govmomi/session" | ||||
| 	"github.com/vmware/govmomi/vim25" | ||||
| 	"github.com/vmware/govmomi/vim25/soap" | ||||
| 	"github.com/vmware/govmomi/vim25/types" | ||||
| ) | ||||
|  | ||||
| type Client struct { | ||||
| 	*vim25.Client | ||||
|  | ||||
| 	SessionManager *session.Manager | ||||
| } | ||||
|  | ||||
| // NewClient creates a new client from a URL. The client authenticates with the | ||||
| // server with username/password before returning if the URL contains user information. | ||||
| func NewClient(ctx context.Context, u *url.URL, insecure bool) (*Client, error) { | ||||
| 	soapClient := soap.NewClient(u, insecure) | ||||
| 	vimClient, err := vim25.NewClient(ctx, soapClient) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	c := &Client{ | ||||
| 		Client:         vimClient, | ||||
| 		SessionManager: session.NewManager(vimClient), | ||||
| 	} | ||||
|  | ||||
| 	// Only login if the URL contains user information. | ||||
| 	if u.User != nil { | ||||
| 		err = c.Login(ctx, u.User) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return c, nil | ||||
| } | ||||
|  | ||||
| // Login dispatches to the SessionManager. | ||||
| func (c *Client) Login(ctx context.Context, u *url.Userinfo) error { | ||||
| 	return c.SessionManager.Login(ctx, u) | ||||
| } | ||||
|  | ||||
| // Logout dispatches to the SessionManager. | ||||
| func (c *Client) Logout(ctx context.Context) error { | ||||
| 	// Close any idle connections after logging out. | ||||
| 	defer c.Client.CloseIdleConnections() | ||||
| 	return c.SessionManager.Logout(ctx) | ||||
| } | ||||
|  | ||||
| // PropertyCollector returns the session's default property collector. | ||||
| func (c *Client) PropertyCollector() *property.Collector { | ||||
| 	return property.DefaultCollector(c.Client) | ||||
| } | ||||
|  | ||||
| // RetrieveOne dispatches to the Retrieve function on the default property collector. | ||||
| func (c *Client) RetrieveOne(ctx context.Context, obj types.ManagedObjectReference, p []string, dst interface{}) error { | ||||
| 	return c.PropertyCollector().RetrieveOne(ctx, obj, p, dst) | ||||
| } | ||||
|  | ||||
| // Retrieve dispatches to the Retrieve function on the default property collector. | ||||
| func (c *Client) Retrieve(ctx context.Context, objs []types.ManagedObjectReference, p []string, dst interface{}) error { | ||||
| 	return c.PropertyCollector().Retrieve(ctx, objs, p, dst) | ||||
| } | ||||
|  | ||||
| // Wait dispatches to property.Wait. | ||||
| func (c *Client) Wait(ctx context.Context, obj types.ManagedObjectReference, ps []string, f func([]types.PropertyChange) bool) error { | ||||
| 	return property.Wait(ctx, c.PropertyCollector(), obj, ps, f) | ||||
| } | ||||
|  | ||||
| // IsVC returns true if we are connected to a vCenter | ||||
| func (c *Client) IsVC() bool { | ||||
| 	return c.Client.IsVC() | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	 Davanum Srinivas
					Davanum Srinivas