diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 4b9123dd3bb..cbe5e73a236 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -38,6 +38,11 @@ "Comment": "v10.0.4-beta-1-g786cc84", "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" }, + { + "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk", + "Comment": "v10.0.4-beta-1-g786cc84", + "Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad" + }, { "ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network", "Comment": "v10.0.4-beta-1-g786cc84", @@ -55,11 +60,11 @@ }, { "ImportPath": "github.com/Azure/go-ansiterm", - "Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e" + "Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe" }, { "ImportPath": "github.com/Azure/go-ansiterm/winterm", - "Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e" + "Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe" }, { "ImportPath": "github.com/Azure/go-autorest/autorest", diff --git a/Godeps/LICENSES b/Godeps/LICENSES index e6f33afd3ef..e43083b177e 100644 --- a/Godeps/LICENSES +++ b/Godeps/LICENSES @@ -8952,6 +8952,216 @@ SOFTWARE. ================================================================================ +================================================================================ += vendor/github.com/Azure/azure-sdk-for-go/arm/disk licensed under: = + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + += vendor/github.com/Azure/azure-sdk-for-go/LICENSE cce6fd055830ca30ff78fdf077e870d6 - +================================================================================ + + ================================================================================ = vendor/github.com/Azure/azure-sdk-for-go/arm/network licensed under: = diff --git a/api/OWNERS b/api/OWNERS index 2a2b6751eed..6d85ec75f9b 100644 --- a/api/OWNERS +++ b/api/OWNERS @@ -17,7 +17,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp diff --git a/build/root/Makefile b/build/root/Makefile index 401b8c44f45..451716b938d 100644 --- a/build/root/Makefile +++ b/build/root/Makefile @@ -523,6 +523,20 @@ bazel-test: bazel test --test_tag_filters=-integration --flaky_test_attempts=3 //cmd/... //pkg/... //federation/... //plugin/... //third_party/... //hack/... //hack:verify-all //vendor/k8s.io/... endif +ifeq ($(PRINT_HELP),y) +define BAZEL_TEST_INTEGRATION_HELP_INFO +# Integration test with bazel +# +# Example: +# make bazel-test-integration +endef +bazel-test-integration: + @echo "$$BAZEL_TEST_INTEGRATION_HELP_INFO" +else +bazel-test-integration: + bazel test //test/integration/... +endif + ifeq ($(PRINT_HELP),y) define BAZEL_BUILD_HELP_INFO # Build release tars with bazel diff --git a/build/root/WORKSPACE b/build/root/WORKSPACE index e411ef563fe..e7f23f182c5 100644 --- a/build/root/WORKSPACE +++ b/build/root/WORKSPACE @@ -12,6 +12,16 @@ http_archive( urls = ["https://github.com/kubernetes/repo-infra/archive/9dedd5f4093884c133ad5ea73695b28338b954ab.tar.gz"], ) +ETCD_VERSION = "3.0.17" + +new_http_archive( + name = "com_coreos_etcd", + build_file = "third_party/etcd.BUILD", + sha256 = "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b", + strip_prefix = "etcd-v%s-linux-amd64" % ETCD_VERSION, + urls = ["https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)], +) + # This contains a patch to not prepend ./ to tarfiles produced by pkg_tar. # When merged upstream, we'll no longer need to use ixdy's fork: # https://bazel-review.googlesource.com/#/c/10390/ diff --git a/cluster/vagrant/OWNERS b/cluster/vagrant/OWNERS index 5415a641596..f90049369f8 100644 --- a/cluster/vagrant/OWNERS +++ b/cluster/vagrant/OWNERS @@ -35,4 +35,3 @@ reviewers: - k82cn - caseydavenport - johscheuer -- rjnagal diff --git a/cmd/kube-controller-manager/OWNERS b/cmd/kube-controller-manager/OWNERS index 9bb16d17821..03b1a794896 100644 --- a/cmd/kube-controller-manager/OWNERS +++ b/cmd/kube-controller-manager/OWNERS @@ -5,7 +5,6 @@ approvers: reviewers: - '249043822' - a-robinson -- bprashanth - brendandburns - caesarxuchao - cjcullen diff --git a/cmd/kubeadm/app/cmd/init.go b/cmd/kubeadm/app/cmd/init.go index 38587ca0953..51e45c6dc29 100644 --- a/cmd/kubeadm/app/cmd/init.go +++ b/cmd/kubeadm/app/cmd/init.go @@ -87,6 +87,12 @@ func NewCmdInit(out io.Writer) *cobra.Command { i, err := NewInit(cfgPath, internalcfg, skipPreFlight, skipTokenPrint) kubeadmutil.CheckErr(err) kubeadmutil.CheckErr(i.Validate(cmd)) + + // TODO: remove this warning in 1.9 + if !cmd.Flags().Lookup("token-ttl").Changed { + fmt.Println("[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)") + } + kubeadmutil.CheckErr(i.Run(out)) }, } diff --git a/cmd/kubeadm/app/cmd/token.go b/cmd/kubeadm/app/cmd/token.go index e496709b884..7e542f54644 100644 --- a/cmd/kubeadm/app/cmd/token.go +++ b/cmd/kubeadm/app/cmd/token.go @@ -109,6 +109,12 @@ func NewCmdToken(out io.Writer, errW io.Writer) *cobra.Command { client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile) kubeadmutil.CheckErr(err) + // TODO: remove this warning in 1.9 + if !tokenCmd.Flags().Lookup("ttl").Changed { + // sending this output to stderr s + fmt.Fprintln(errW, "[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --ttl 0)") + } + err = RunCreateToken(out, client, token, tokenDuration, usages, description) kubeadmutil.CheckErr(err) }, diff --git a/cmd/kubeadm/app/constants/constants.go b/cmd/kubeadm/app/constants/constants.go index 8a6ef8a2f4f..98c43e4a150 100644 --- a/cmd/kubeadm/app/constants/constants.go +++ b/cmd/kubeadm/app/constants/constants.go @@ -84,8 +84,8 @@ const ( MinimumAddressesInServiceSubnet = 10 // DefaultTokenDuration specifies the default amount of time that a bootstrap token will be valid - // Default behaviour is "never expire" == 0 - DefaultTokenDuration = 0 + // Default behaviour is 24 hours + DefaultTokenDuration = 24 * time.Hour // LabelNodeRoleMaster specifies that a node is a master // It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112 diff --git a/examples/podsecuritypolicy/rbac/bindings.yaml b/examples/podsecuritypolicy/rbac/bindings.yaml index b07f99ee21b..13b8ac3c4ac 100644 --- a/examples/podsecuritypolicy/rbac/bindings.yaml +++ b/examples/podsecuritypolicy/rbac/bindings.yaml @@ -31,7 +31,8 @@ roleRef: kind: ClusterRole name: restricted-psp-user --- -# edit grants edit role to system:authenticated. +# edit grants edit role to the groups +# restricted and privileged. apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: diff --git a/examples/volumes/flexvolume/nfs b/examples/volumes/flexvolume/nfs index 31254506327..4d0977cec87 100755 --- a/examples/volumes/flexvolume/nfs +++ b/examples/volumes/flexvolume/nfs @@ -48,7 +48,7 @@ domount() { SHARE=$(echo $2 | jq -r '.share') if [ $(ismounted) -eq 1 ] ; then - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 fi @@ -59,14 +59,14 @@ domount() { err "{ \"status\": \"Failure\", \"message\": \"Failed to mount ${NFS_SERVER}:${SHARE} at ${MNTPATH}\"}" exit 1 fi - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 } unmount() { MNTPATH=$1 if [ $(ismounted) -eq 0 ] ; then - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 fi @@ -76,14 +76,19 @@ unmount() { exit 1 fi - log "{\"status\": \"Success\"}" + log '{"status": "Success"}' exit 0 } op=$1 +if ! command -v jq >/dev/null 2>&1; then + err "{ \"status\": \"Failure\", \"message\": \"'jq' binary not found. Please install jq package before using this driver\"}" + exit 1 +fi + if [ "$op" = "init" ]; then - log "{\"status\": \"Success\", \"capabilities\": {\"attach\": false}}" + log '{"status": "Success", "capabilities": {"attach": false}}' exit 0 fi @@ -101,7 +106,7 @@ case "$op" in unmount $* ;; *) - log "{ \"status\": \"Not supported\" }" + log '{"status": "Not supported"}' exit 0 esac diff --git a/hack/.linted_packages b/hack/.linted_packages index 970aab80cd8..7ab9eb282ff 100644 --- a/hack/.linted_packages +++ b/hack/.linted_packages @@ -205,6 +205,7 @@ pkg/controller/volume/attachdetach/util pkg/conversion pkg/conversion/queryparams pkg/credentialprovider/aws +pkg/credentialprovider/azure pkg/fieldpath pkg/fields pkg/hyperkube diff --git a/hack/verify-flags/excluded-flags.txt b/hack/verify-flags/excluded-flags.txt index fc1e86d0e8d..7b6cd4e4cce 100644 --- a/hack/verify-flags/excluded-flags.txt +++ b/hack/verify-flags/excluded-flags.txt @@ -24,3 +24,4 @@ valid_flag retry_time file_content_in_loop break_on_expected_content +Premium_LRS diff --git a/pkg/api/OWNERS b/pkg/api/OWNERS index 2a2b6751eed..6d85ec75f9b 100644 --- a/pkg/api/OWNERS +++ b/pkg/api/OWNERS @@ -17,7 +17,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp diff --git a/pkg/api/testapi/OWNERS b/pkg/api/testapi/OWNERS index 15d3ffea5e9..ede98b35226 100755 --- a/pkg/api/testapi/OWNERS +++ b/pkg/api/testapi/OWNERS @@ -8,7 +8,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - erictune - tallclair - eparis diff --git a/pkg/api/testing/OWNERS b/pkg/api/testing/OWNERS index e8571984a24..2ee0a449ca9 100755 --- a/pkg/api/testing/OWNERS +++ b/pkg/api/testing/OWNERS @@ -11,7 +11,6 @@ reviewers: - vishh - mikedanese - nikhiljindal -- bprashanth - erictune - pmorie - dchen1107 diff --git a/pkg/api/v1/OWNERS b/pkg/api/v1/OWNERS index ff40f221a2e..66568527f93 100755 --- a/pkg/api/v1/OWNERS +++ b/pkg/api/v1/OWNERS @@ -12,7 +12,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp diff --git a/pkg/api/validation/OWNERS b/pkg/api/validation/OWNERS index 68d4f661612..66dfd87d3b4 100755 --- a/pkg/api/validation/OWNERS +++ b/pkg/api/validation/OWNERS @@ -12,7 +12,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp @@ -37,4 +36,3 @@ reviewers: - krousey - rootfs - markturansky -- vmarmol diff --git a/pkg/apis/OWNERS b/pkg/apis/OWNERS index 5a5a94c5421..66878903a22 100644 --- a/pkg/apis/OWNERS +++ b/pkg/apis/OWNERS @@ -16,7 +16,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - pmorie diff --git a/pkg/apis/apps/OWNERS b/pkg/apis/apps/OWNERS index c98ebb6b4e7..e06ff4c481e 100755 --- a/pkg/apis/apps/OWNERS +++ b/pkg/apis/apps/OWNERS @@ -4,7 +4,6 @@ reviewers: - smarterclayton - deads2k - caesarxuchao -- bprashanth - pmorie - sttts - saad-ali diff --git a/pkg/apis/componentconfig/OWNERS b/pkg/apis/componentconfig/OWNERS index a644a39fcd7..16e6aa37c33 100755 --- a/pkg/apis/componentconfig/OWNERS +++ b/pkg/apis/componentconfig/OWNERS @@ -11,7 +11,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - sttts - dchen1107 diff --git a/pkg/apis/extensions/OWNERS b/pkg/apis/extensions/OWNERS index 8350a5a66ae..29a40fb54ee 100755 --- a/pkg/apis/extensions/OWNERS +++ b/pkg/apis/extensions/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - erictune - pmorie - sttts diff --git a/pkg/client/OWNERS b/pkg/client/OWNERS index c8472a94f2e..0f192509905 100644 --- a/pkg/client/OWNERS +++ b/pkg/client/OWNERS @@ -18,7 +18,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp diff --git a/pkg/client/clientset_generated/clientset/BUILD b/pkg/client/clientset_generated/clientset/BUILD index c35ae87d0ce..a922681e791 100644 --- a/pkg/client/clientset_generated/clientset/BUILD +++ b/pkg/client/clientset_generated/clientset/BUILD @@ -12,6 +12,7 @@ go_library( srcs = [ "clientset.go", "doc.go", + "import.go", "import_known_versions.go", ], tags = ["automanaged"], diff --git a/pkg/client/clientset_generated/clientset/import.go b/pkg/client/clientset_generated/clientset/import.go new file mode 100644 index 00000000000..0dcf5f0b90b --- /dev/null +++ b/pkg/client/clientset_generated/clientset/import.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file exists to enforce this clientset's vanity import path. + +package clientset // import "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" diff --git a/pkg/client/unversioned/OWNERS b/pkg/client/unversioned/OWNERS index d24f87183c1..33d109321c4 100755 --- a/pkg/client/unversioned/OWNERS +++ b/pkg/client/unversioned/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - ixdy - gmarek - erictune diff --git a/pkg/cloudprovider/OWNERS b/pkg/cloudprovider/OWNERS index 657fe57964b..5abbe51cb2c 100644 --- a/pkg/cloudprovider/OWNERS +++ b/pkg/cloudprovider/OWNERS @@ -12,7 +12,6 @@ reviewers: - vishh - mikedanese - liggitt -- bprashanth - gmarek - erictune - davidopp @@ -34,7 +33,6 @@ reviewers: - rootfs - jszczepkowski - markturansky -- vmarmol - girishkalele - satnam6502 - jdef diff --git a/pkg/cloudprovider/providers/azure/BUILD b/pkg/cloudprovider/providers/azure/BUILD index 925dc350986..2dc4b7b20e8 100644 --- a/pkg/cloudprovider/providers/azure/BUILD +++ b/pkg/cloudprovider/providers/azure/BUILD @@ -13,18 +13,19 @@ go_library( srcs = [ "azure.go", "azure_backoff.go", - "azure_blob.go", + "azure_blobDiskController.go", + "azure_controllerCommon.go", "azure_file.go", "azure_instance_metadata.go", "azure_instances.go", "azure_loadbalancer.go", + "azure_managedDiskController.go", "azure_routes.go", "azure_storage.go", "azure_storageaccount.go", "azure_util.go", "azure_wrap.go", "azure_zones.go", - "vhd.go", ], tags = ["automanaged"], deps = [ @@ -34,6 +35,7 @@ go_library( "//pkg/version:go_default_library", "//pkg/volume:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library", diff --git a/pkg/cloudprovider/providers/azure/azure.go b/pkg/cloudprovider/providers/azure/azure.go index 1ec9db11b31..8ae82707ded 100644 --- a/pkg/cloudprovider/providers/azure/azure.go +++ b/pkg/cloudprovider/providers/azure/azure.go @@ -30,6 +30,7 @@ import ( "k8s.io/kubernetes/pkg/version" "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/Azure/azure-sdk-for-go/arm/disk" "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/go-autorest/autorest" @@ -107,6 +108,9 @@ type Config struct { // Use instance metadata service where possible UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"` + + // Use managed service identity for the virtual machine to access Azure ARM APIs + UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"` } // Cloud holds the config and clients @@ -122,8 +126,13 @@ type Cloud struct { SecurityGroupsClient network.SecurityGroupsClient VirtualMachinesClient compute.VirtualMachinesClient StorageAccountClient storage.AccountsClient + DisksClient disk.DisksClient operationPollRateLimiter flowcontrol.RateLimiter resourceRequestBackoff wait.Backoff + + *BlobDiskController + *ManagedDiskController + *controllerCommon } func init() { @@ -145,62 +154,62 @@ func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.Private return certificate, rsaPrivateKey, nil } -// newServicePrincipalToken creates a new service principal token based on the configuration -func newServicePrincipalToken(az *Cloud) (*adal.ServicePrincipalToken, error) { - oauthConfig, err := adal.NewOAuthConfig(az.Environment.ActiveDirectoryEndpoint, az.TenantID) +// GetServicePrincipalToken creates a new service principal token based on the configuration +func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID) if err != nil { return nil, fmt.Errorf("creating the OAuth config: %v", err) } - if len(az.AADClientSecret) > 0 { + if config.UseManagedIdentityExtension { + glog.V(2).Infoln("azure: using managed identity extension to retrieve access token") + return adal.NewServicePrincipalTokenFromMSI( + *oauthConfig, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientSecret) > 0 { + glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token") return adal.NewServicePrincipalToken( *oauthConfig, - az.AADClientID, - az.AADClientSecret, - az.Environment.ServiceManagementEndpoint) - } else if len(az.AADClientCertPath) > 0 && len(az.AADClientCertPassword) > 0 { - certData, err := ioutil.ReadFile(az.AADClientCertPath) + config.AADClientID, + config.AADClientSecret, + env.ServiceManagementEndpoint) + } + + if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 { + glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token") + certData, err := ioutil.ReadFile(config.AADClientCertPath) if err != nil { - return nil, fmt.Errorf("reading the client certificate from file %s: %v", az.AADClientCertPath, err) + return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err) } - certificate, privateKey, err := decodePkcs12(certData, az.AADClientCertPassword) + certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword) if err != nil { return nil, fmt.Errorf("decoding the client certificate: %v", err) } return adal.NewServicePrincipalTokenFromCertificate( *oauthConfig, - az.AADClientID, + config.AADClientID, certificate, privateKey, - az.Environment.ServiceManagementEndpoint) - } else { - return nil, fmt.Errorf("No credentials provided for AAD application %s", az.AADClientID) + env.ServiceManagementEndpoint) } + + return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID) } // NewCloud returns a Cloud with initialized clients func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { - var az Cloud - - configContents, err := ioutil.ReadAll(configReader) + config, env, err := ParseConfig(configReader) if err != nil { return nil, err } - err = yaml.Unmarshal(configContents, &az) - if err != nil { - return nil, err + az := Cloud{ + Config: *config, + Environment: *env, } - if az.Cloud == "" { - az.Environment = azure.PublicCloud - } else { - az.Environment, err = azure.EnvironmentFromName(az.Cloud) - if err != nil { - return nil, err - } - } - - servicePrincipalToken, err := newServicePrincipalToken(&az) + servicePrincipalToken, err := GetServicePrincipalToken(config, env) if err != nil { return nil, err } @@ -255,6 +264,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + configureUserAgent(&az.StorageAccountClient.Client) + + az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID) + az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken) + configureUserAgent(&az.DisksClient.Client) // Conditionally configure rate limits if az.CloudProviderRateLimit { @@ -304,9 +318,37 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) { az.CloudProviderBackoffJitter) } + if err := initDiskControllers(&az); err != nil { + return nil, err + } return &az, nil } +// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file +func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) { + var config Config + + configContents, err := ioutil.ReadAll(configReader) + if err != nil { + return nil, nil, err + } + err = yaml.Unmarshal(configContents, &config) + if err != nil { + return nil, nil, err + } + + var env azure.Environment + if config.Cloud == "" { + env = azure.PublicCloud + } else { + env, err = azure.EnvironmentFromName(config.Cloud) + if err != nil { + return nil, nil, err + } + } + return &config, &env, nil +} + // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {} @@ -353,3 +395,42 @@ func configureUserAgent(client *autorest.Client) { k8sVersion := version.Get().GitVersion client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion) } + +func initDiskControllers(az *Cloud) error { + // Common controller contains the function + // needed by both blob disk and managed disk controllers + + common := &controllerCommon{ + aadResourceEndPoint: az.Environment.ServiceManagementEndpoint, + clientID: az.AADClientID, + clientSecret: az.AADClientSecret, + location: az.Location, + storageEndpointSuffix: az.Environment.StorageEndpointSuffix, + managementEndpoint: az.Environment.ResourceManagerEndpoint, + resourceGroup: az.ResourceGroup, + tenantID: az.TenantID, + tokenEndPoint: az.Environment.ActiveDirectoryEndpoint, + subscriptionID: az.SubscriptionID, + cloud: az, + } + + // BlobDiskController: contains the function needed to + // create/attach/detach/delete blob based (unmanaged disks) + blobController, err := newBlobDiskController(common) + if err != nil { + return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error()) + } + + // ManagedDiskController: contains the functions needed to + // create/attach/detach/delete managed disks + managedController, err := newManagedDiskController(common) + if err != nil { + return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error()) + } + + az.BlobDiskController = blobController + az.ManagedDiskController = managedController + az.controllerCommon = common + + return nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_blob.go b/pkg/cloudprovider/providers/azure/azure_blob.go deleted file mode 100644 index bf3e8de39aa..00000000000 --- a/pkg/cloudprovider/providers/azure/azure_blob.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure - -import ( - "fmt" - "regexp" - "strings" - - "bytes" - - azs "github.com/Azure/azure-sdk-for-go/storage" -) - -const ( - vhdContainerName = "vhds" - useHTTPS = true - blobServiceName = "blob" -) - -// create page blob -func (az *Cloud) createVhdBlob(accountName, accountKey, name string, sizeGB int64, tags map[string]string) (string, string, error) { - blobClient, err := az.getBlobClient(accountName, accountKey) - if err != nil { - return "", "", err - } - size := 1024 * 1024 * 1024 * sizeGB - vhdSize := size + vhdHeaderSize /* header size */ - // Blob name in URL must end with '.vhd' extension. - name = name + ".vhd" - cnt := blobClient.GetContainerReference(vhdContainerName) - b := cnt.GetBlobReference(name) - b.Properties.ContentLength = vhdSize - b.Metadata = tags - err = b.PutPageBlob(nil) - if err != nil { - // if container doesn't exist, create one and retry PutPageBlob - detail := err.Error() - if strings.Contains(detail, errContainerNotFound) { - err = cnt.Create(&azs.CreateContainerOptions{Access: azs.ContainerAccessTypePrivate}) - if err == nil { - b := cnt.GetBlobReference(name) - b.Properties.ContentLength = vhdSize - b.Metadata = tags - err = b.PutPageBlob(nil) - } - } - } - if err != nil { - return "", "", fmt.Errorf("failed to put page blob: %v", err) - } - - // add VHD signature to the blob - h, err := createVHDHeader(uint64(size)) - if err != nil { - az.deleteVhdBlob(accountName, accountKey, name) - return "", "", fmt.Errorf("failed to create vhd header, err: %v", err) - } - blobRange := azs.BlobRange{ - Start: uint64(size), - End: uint64(vhdSize - 1), - } - if err = b.WriteRange(blobRange, bytes.NewBuffer(h[:vhdHeaderSize]), nil); err != nil { - az.deleteVhdBlob(accountName, accountKey, name) - return "", "", fmt.Errorf("failed to update vhd header, err: %v", err) - } - - scheme := "http" - if useHTTPS { - scheme = "https" - } - host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, az.Environment.StorageEndpointSuffix) - uri := fmt.Sprintf("%s/%s/%s", host, vhdContainerName, name) - return name, uri, nil - -} - -// delete a vhd blob -func (az *Cloud) deleteVhdBlob(accountName, accountKey, blobName string) error { - blobClient, err := az.getBlobClient(accountName, accountKey) - if err == nil { - cnt := blobClient.GetContainerReference(vhdContainerName) - b := cnt.GetBlobReference(blobName) - return b.Delete(nil) - } - return err -} - -func (az *Cloud) getBlobClient(accountName, accountKey string) (*azs.BlobStorageClient, error) { - client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS) - if err != nil { - return nil, fmt.Errorf("error creating azure client: %v", err) - } - b := client.GetBlobService() - return &b, nil -} - -// get uri https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name) -func (az *Cloud) getBlobNameAndAccountFromURI(uri string) (string, string, error) { - scheme := "http" - if useHTTPS { - scheme = "https" - } - host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, az.Environment.StorageEndpointSuffix) - reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName) - re := regexp.MustCompile(reStr) - res := re.FindSubmatch([]byte(uri)) - if len(res) < 3 { - return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, uri) - } - return string(res[1]), string(res[2]), nil -} diff --git a/pkg/cloudprovider/providers/azure/azure_blobDiskController.go b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go new file mode 100644 index 00000000000..037c4941ef2 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_blobDiskController.go @@ -0,0 +1,808 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "net/url" + "os" + "regexp" + "sync" + + "strconv" + "strings" + "sync/atomic" + "time" + + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + azstorage "github.com/Azure/azure-sdk-for-go/storage" + "github.com/Azure/go-autorest/autorest/to" + "github.com/golang/glog" + "github.com/rubiojr/go-vhd/vhd" + kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + vhdContainerName = "vhds" + useHTTPSForBlobBasedDisk = true + blobServiceName = "blob" +) + +type storageAccountState struct { + name string + saType storage.SkuName + key string + diskCount int32 + isValidating int32 + defaultContainerCreated bool +} + +//BlobDiskController : blob disk controller struct +type BlobDiskController struct { + common *controllerCommon + accounts map[string]*storageAccountState +} + +var defaultContainerName = "" +var storageAccountNamePrefix = "" +var storageAccountNameMatch = "" +var initFlag int64 + +var accountsLock = &sync.Mutex{} + +func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) { + c := BlobDiskController{common: common} + err := c.init() + + if err != nil { + return nil, err + } + + return &c, nil +} + +// CreateVolume creates a VHD blob in a given storage account, will create the given storage account if it does not exist in current resource group +func (c *BlobDiskController) CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) { + key, err := c.common.cloud.getStorageAccesskey(storageAccount) + if err != nil { + glog.V(2).Infof("azureDisk - no key found for storage account %s in resource group %s, begin to create a new storage account", storageAccount, c.common.resourceGroup) + + cp := storage.AccountCreateParameters{ + Sku: &storage.Sku{Name: storageAccountType}, + Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")}, + Location: &location} + cancel := make(chan struct{}) + + _, errchan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccount, cp, cancel) + err = <-errchan + if err != nil { + return "", "", 0, fmt.Errorf(fmt.Sprintf("Create Storage Account %s, error: %s", storageAccount, err)) + } + + key, err = c.common.cloud.getStorageAccesskey(storageAccount) + if err != nil { + return "", "", 0, fmt.Errorf("no key found for storage account %s even after creating a new storage account", storageAccount) + } + + glog.Errorf("no key found for storage account %s in resource group %s", storageAccount, c.common.resourceGroup) + return "", "", 0, err + } + + client, err := azstorage.NewBasicClient(storageAccount, key) + if err != nil { + return "", "", 0, err + } + blobClient := client.GetBlobService() + + container := blobClient.GetContainerReference(vhdContainerName) + _, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return "", "", 0, err + } + + diskName, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccount, name, vhdContainerName, int64(requestGB)) + if err != nil { + return "", "", 0, err + } + + glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI) + return diskName, diskURI, requestGB, err +} + +// DeleteVolume deletes a VHD blob +func (c *BlobDiskController) DeleteVolume(diskURI string) error { + glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI) + accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI) + if err != nil { + return fmt.Errorf("failed to parse vhd URI %v", err) + } + key, err := c.common.cloud.getStorageAccesskey(accountName) + if err != nil { + return fmt.Errorf("no key for storage account %s, err %v", accountName, err) + } + err = c.common.cloud.deleteVhdBlob(accountName, key, blob) + if err != nil { + glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err) + detail := err.Error() + if strings.Contains(detail, errLeaseIDMissing) { + // disk is still being used + // see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx + return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI)) + } + return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err) + } + glog.V(4).Infof("azureDisk - blob %s deleted", diskURI) + return nil + +} + +// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name) +func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) { + scheme := "http" + if useHTTPSForBlobBasedDisk { + scheme = "https" + } + host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix) + reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName) + re := regexp.MustCompile(reStr) + res := re.FindSubmatch([]byte(diskURI)) + if len(res) < 3 { + return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI) + } + return string(res[1]), string(res[2]), nil +} + +func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) { + container := blobClient.GetContainerReference(containerName) + _, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return "", "", err + } + + size := 1024 * 1024 * 1024 * sizeGB + vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */ + // Blob name in URL must end with '.vhd' extension. + vhdName = vhdName + ".vhd" + + tags := make(map[string]string) + tags["createdby"] = "k8sAzureDataDisk" + glog.V(4).Infof("azureDisk - creating page blob %name in container %s account %s", vhdName, containerName, accountName) + + blob := container.GetBlobReference(vhdName) + blob.Properties.ContentLength = vhdSize + blob.Metadata = tags + err = blob.PutPageBlob(nil) + if err != nil { + return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err) + } + + // add VHD signature to the blob + h, err := createVHDHeader(uint64(size)) + if err != nil { + blob.DeleteIfExists(nil) + return "", "", fmt.Errorf("failed to create vhd header, err: %v", err) + } + + blobRange := azstorage.BlobRange{ + Start: uint64(size), + End: uint64(vhdSize - 1), + } + if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil { + glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n", + vhdName, containerName, accountName, err.Error()) + return "", "", err + } + + scheme := "http" + if useHTTPSForBlobBasedDisk { + scheme = "https" + } + + host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix) + uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName) + return vhdName, uri, nil +} + +// delete a vhd blob +func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error { + client, err := azstorage.NewBasicClient(accountName, accountKey) + if err != nil { + return err + } + blobSvc := client.GetBlobService() + + container := blobSvc.GetContainerReference(vhdContainerName) + blob := container.GetBlobReference(blobName) + return blob.Delete(nil) +} + +//CreateBlobDisk : create a blob disk in a node +func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) { + glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s StandAlone:%v", dataDiskName, storageAccountType, forceStandAlone) + + var storageAccountName = "" + var err error + + if forceStandAlone { + // we have to wait until the storage account is is created + storageAccountName = "p" + MakeCRC32(c.common.subscriptionID+c.common.resourceGroup+dataDiskName) + err = c.createStorageAccount(storageAccountName, storageAccountType, c.common.location, false) + if err != nil { + return "", err + } + } else { + storageAccountName, err = c.findSANameForDisk(storageAccountType) + if err != nil { + return "", err + } + } + + blobClient, err := c.getBlobSvcClient(storageAccountName) + if err != nil { + return "", err + } + + _, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB)) + if err != nil { + return "", err + } + + if !forceStandAlone { + atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1) + } + + return diskURI, nil +} + +//DeleteBlobDisk : delete a blob disk from a node +func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) error { + storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI) + if err != nil { + return err + } + + _, ok := c.accounts[storageAccountName] + if !ok { + // the storage account is specified by user + glog.V(4).Infof("azureDisk - deleting volume %s", diskURI) + return c.DeleteVolume(diskURI) + } + // if forced (as in one disk = one storage account) + // delete the account completely + if wasForced { + return c.deleteStorageAccount(storageAccountName) + } + + blobSvc, err := c.getBlobSvcClient(storageAccountName) + if err != nil { + return err + } + + glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName) + + container := blobSvc.GetContainerReference(defaultContainerName) + blob := container.GetBlobReference(vhdName) + _, err = blob.DeleteIfExists(nil) + + if c.accounts[storageAccountName].diskCount == -1 { + if diskCount, err := c.getDiskCount(storageAccountName); err != nil { + c.accounts[storageAccountName].diskCount = int32(diskCount) + } else { + glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName) + return nil // we have failed to aquire a new count. not an error condition + } + } + atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1) + return err +} + +// Init tries best effort to ensure that 2 accounts standard/premium were created +// to be used by shared blob disks. This to increase the speed pvc provisioning (in most of cases) +func (c *BlobDiskController) init() error { + if !c.shouldInit() { + return nil + } + + c.setUniqueStrings() + + // get accounts + accounts, err := c.getAllStorageAccounts() + if err != nil { + return err + } + c.accounts = accounts + + if len(c.accounts) == 0 { + counter := 1 + for counter <= storageAccountsCountInit { + + accountType := storage.PremiumLRS + if n := math.Mod(float64(counter), 2); n == 0 { + accountType = storage.StandardLRS + } + + // We don't really care if these calls failed + // at this stage, we are trying to ensure 2 accounts (Standard/Premium) + // are there ready for PVC creation + + // if we failed here, the accounts will be created in the process + // of creating PVC + + // nor do we care if they were partially created, as the entire + // account creation process is idempotent + go func(thisNext int) { + newAccountName := getAccountNameForNum(thisNext) + + glog.Infof("azureDisk - BlobDiskController init process will create new storageAccount:%s type:%s", newAccountName, accountType) + err := c.createStorageAccount(newAccountName, accountType, c.common.location, true) + // TODO return created and error from + if err != nil { + glog.Infof("azureDisk - BlobDiskController init: create account %s with error:%s", newAccountName, err.Error()) + + } else { + glog.Infof("azureDisk - BlobDiskController init: created account %s", newAccountName) + } + }(counter) + counter = counter + 1 + } + } + + return nil +} + +//Sets unique strings to be used as accountnames && || blob containers names +func (c *BlobDiskController) setUniqueStrings() { + uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID + hash := MakeCRC32(uniqueString) + //used to generate a unqie container name used by this cluster PVC + defaultContainerName = hash + + storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash) + // Used to filter relevant accounts (accounts used by shared PVC) + storageAccountNameMatch = storageAccountNamePrefix + // Used as a template to create new names for relevant accounts + storageAccountNamePrefix = storageAccountNamePrefix + "%s" +} +func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) { + if account, exists := c.accounts[SAName]; exists && account.key != "" { + return c.accounts[SAName].key, nil + } + listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName) + if err != nil { + return "", err + } + if listKeysResult.Keys == nil { + return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName) + } + for _, v := range *listKeysResult.Keys { + if v.Value != nil && *v.Value == "key1" { + if _, ok := c.accounts[SAName]; !ok { + glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName) + return *v.Value, nil + } + } + + c.accounts[SAName].key = *v.Value + return c.accounts[SAName].key, nil + } + + return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName) +} + +func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) { + key := "" + var client azstorage.Client + var blobSvc azstorage.BlobStorageClient + var err error + if key, err = c.getStorageAccountKey(SAName); err != nil { + return blobSvc, err + } + + if client, err = azstorage.NewBasicClient(SAName, key); err != nil { + return blobSvc, err + } + + blobSvc = client.GetBlobService() + return blobSvc, nil +} + +func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error { + var err error + var blobSvc azstorage.BlobStorageClient + + // short circut the check via local cache + // we are forgiving the fact that account may not be in cache yet + if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { + return nil + } + + // not cached, check existance and readiness + bExist, provisionState, _ := c.getStorageAccountState(storageAccountName) + + // account does not exist + if !bExist { + return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName) + } + + // account exists but not ready yet + if provisionState != storage.Succeeded { + // we don't want many attempts to validate the account readiness + // here hence we are locking + counter := 1 + for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; { + time.Sleep(3 * time.Second) + counter = counter + 1 + // check if we passed the max sleep + if counter >= 20 { + return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName) + } + } + + // swapped + defer func() { + c.accounts[storageAccountName].isValidating = 0 + }() + + // short circut the check again. + if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated { + return nil + } + + err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { + _, provisionState, err := c.getStorageAccountState(storageAccountName) + + if err != nil { + glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error()) + return false, err + } + + if provisionState == storage.Succeeded { + return true, nil + } + + glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet", storageAccountName) + // leave it for next loop/sync loop + return false, fmt.Errorf("azureDisk - Account %s has not been flagged Succeeded by ARM", storageAccountName) + }) + // we have failed to ensure that account is ready for us to create + // the default vhd container + if err != nil { + return err + } + } + + if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil { + return err + } + + container := blobSvc.GetContainerReference(defaultContainerName) + bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate}) + if err != nil { + return err + } + if bCreated { + glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName) + } + + // flag so we no longer have to check on ARM + c.accounts[storageAccountName].defaultContainerCreated = true + return nil +} + +// Gets Disk counts per storage account +func (c *BlobDiskController) getDiskCount(SAName string) (int, error) { + // if we have it in cache + if c.accounts[SAName].diskCount != -1 { + return int(c.accounts[SAName].diskCount), nil + } + + var err error + var blobSvc azstorage.BlobStorageClient + + if err = c.ensureDefaultContainer(SAName); err != nil { + return 0, err + } + + if blobSvc, err = c.getBlobSvcClient(SAName); err != nil { + return 0, err + } + params := azstorage.ListBlobsParameters{} + + container := blobSvc.GetContainerReference(defaultContainerName) + response, err := container.ListBlobs(params) + if err != nil { + return 0, err + } + glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs)) + c.accounts[SAName].diskCount = int32(len(response.Blobs)) + + return int(c.accounts[SAName].diskCount), nil +} + +// shouldInit ensures that we only init the plugin once +// and we only do that in the controller + +func (c *BlobDiskController) shouldInit() bool { + if os.Args[0] == "kube-controller-manager" || (os.Args[0] == "/hyperkube" && os.Args[1] == "controller-manager") { + swapped := atomic.CompareAndSwapInt64(&initFlag, 0, 1) + if swapped { + return true + } + } + return false +} + +func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) { + accountListResult, err := c.common.cloud.StorageAccountClient.List() + if err != nil { + return nil, err + } + if accountListResult.Value == nil { + return nil, fmt.Errorf("azureDisk - empty accountListResult") + } + + accounts := make(map[string]*storageAccountState) + for _, v := range *accountListResult.Value { + if strings.Index(*v.Name, storageAccountNameMatch) != 0 { + continue + } + if v.Name == nil || v.Sku == nil { + glog.Infof("azureDisk - accountListResult Name or Sku is nil") + continue + } + glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name) + + sastate := &storageAccountState{ + name: *v.Name, + saType: (*v.Sku).Name, + diskCount: -1, + } + + accounts[*v.Name] = sastate + } + + return accounts, nil +} + +func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error { + bExist, _, _ := c.getStorageAccountState(storageAccountName) + if bExist { + newAccountState := &storageAccountState{ + diskCount: -1, + saType: storageAccountType, + name: storageAccountName, + } + + c.addAccountState(storageAccountName, newAccountState) + } + // Account Does not exist + if !bExist { + if len(c.accounts) == maxStorageAccounts && checkMaxAccounts { + return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts) + } + + glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType)) + + cp := storage.AccountCreateParameters{ + Sku: &storage.Sku{Name: storageAccountType}, + Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")}, + Location: &location} + cancel := make(chan struct{}) + + _, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel) + err := <-errChan + if err != nil { + return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err)) + } + + newAccountState := &storageAccountState{ + diskCount: -1, + saType: storageAccountType, + name: storageAccountName, + } + + c.addAccountState(storageAccountName, newAccountState) + } + + if !bExist { + // SA Accounts takes time to be provisioned + // so if this account was just created allow it sometime + // before polling + glog.V(2).Infof("azureDisk - storage account %s was just created, allowing time before polling status") + time.Sleep(25 * time.Second) // as observed 25 is the average time for SA to be provisioned + } + + // finally, make sure that we default container is created + // before handing it back over + return c.ensureDefaultContainer(storageAccountName) +} + +// finds a new suitable storageAccount for this disk +func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) { + maxDiskCount := maxDisksPerStorageAccounts + SAName := "" + totalDiskCounts := 0 + countAccounts := 0 // account of this type. + for _, v := range c.accounts { + // filter out any stand-alone disks/accounts + if strings.Index(v.name, storageAccountNameMatch) != 0 { + continue + } + + // note: we compute avge stratified by type. + // this to enable user to grow per SA type to avoid low + //avg utilization on one account type skewing all data. + + if v.saType == storageAccountType { + // compute average + dCount, err := c.getDiskCount(v.name) + if err != nil { + return "", err + } + totalDiskCounts = totalDiskCounts + dCount + countAccounts = countAccounts + 1 + // empty account + if dCount == 0 { + glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name) + return v.name, nil // shortcircut, avg is good and no need to adjust + } + // if this account is less allocated + if dCount < maxDiskCount { + maxDiskCount = dCount + SAName = v.name + } + } + } + + // if we failed to find storageaccount + if SAName == "" { + glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account") + SAName = getAccountNameForNum(c.getNextAccountNum()) + err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) + if err != nil { + return "", err + } + return SAName, nil + } + + disksAfter := totalDiskCounts + 1 // with the new one! + + avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts) + aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing) + + // avg are not create and we should craete more accounts if we can + if aboveAvg && countAccounts < maxStorageAccounts { + glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing) + SAName = getAccountNameForNum(c.getNextAccountNum()) + err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true) + if err != nil { + return "", err + } + return SAName, nil + } + + // avergates are not ok and we are at capacity(max storage accounts allowed) + if aboveAvg && countAccounts == maxStorageAccounts { + glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts", + avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts) + } + + // we found a storage accounts && [ avg are ok || we reached max sa count ] + return SAName, nil +} +func (c *BlobDiskController) getNextAccountNum() int { + max := 0 + + for k := range c.accounts { + // filter out accounts that are for standalone + if strings.Index(k, storageAccountNameMatch) != 0 { + continue + } + num := getAccountNumFromName(k) + if num > max { + max = num + } + } + + return max + 1 +} + +func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error { + resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName) + if err != nil { + return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err) + } + + c.removeAccountState(storageAccountName) + + glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName) + return nil +} + +//Gets storage account exist, provisionStatus, Error if any +func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) { + account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName) + if err != nil { + return false, "", err + } + return true, account.AccountProperties.ProvisioningState, nil +} + +func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) { + accountsLock.Lock() + defer accountsLock.Unlock() + + if _, ok := c.accounts[key]; !ok { + c.accounts[key] = state + } +} + +func (c *BlobDiskController) removeAccountState(key string) { + accountsLock.Lock() + defer accountsLock.Unlock() + delete(c.accounts, key) +} + +// pads account num with zeros as needed +func getAccountNameForNum(num int) string { + sNum := strconv.Itoa(num) + missingZeros := 3 - len(sNum) + strZero := "" + for missingZeros > 0 { + strZero = strZero + "0" + missingZeros = missingZeros - 1 + } + + sNum = strZero + sNum + return fmt.Sprintf(storageAccountNamePrefix, sNum) +} + +func getAccountNumFromName(accountName string) int { + nameLen := len(accountName) + num, _ := strconv.Atoi(accountName[nameLen-3:]) + + return num +} + +func createVHDHeader(size uint64) ([]byte, error) { + h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{}) + b := new(bytes.Buffer) + err := binary.Write(b, binary.BigEndian, h) + if err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func diskNameandSANameFromURI(diskURI string) (string, string, error) { + uri, err := url.Parse(diskURI) + if err != nil { + return "", "", err + } + + hostName := uri.Host + storageAccountName := strings.Split(hostName, ".")[0] + + segments := strings.Split(uri.Path, "/") + diskNameVhd := segments[len(segments)-1] + + return storageAccountName, diskNameVhd, nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_controllerCommon.go b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go new file mode 100644 index 00000000000..881a7dbb2c4 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_controllerCommon.go @@ -0,0 +1,270 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + "strings" + "time" + + "k8s.io/apimachinery/pkg/types" + kwait "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/pkg/cloudprovider" + + "github.com/Azure/azure-sdk-for-go/arm/compute" + "github.com/golang/glog" +) + +const ( + defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map) + storageAccountNameTemplate = "pvc%s" + + // for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits + maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks + maxDisksPerStorageAccounts = 60 + storageAccountUtilizationBeforeGrowing = 0.5 + storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount + + maxLUN = 64 // max number of LUNs per VM + errLeaseFailed = "AcquireDiskLeaseFailed" + errLeaseIDMissing = "LeaseIdMissing" + errContainerNotFound = "ContainerNotFound" +) + +var defaultBackOff = kwait.Backoff{ + Steps: 20, + Duration: 2 * time.Second, + Factor: 1.5, + Jitter: 0.0, +} + +type controllerCommon struct { + tenantID string + subscriptionID string + location string + storageEndpointSuffix string + resourceGroup string + clientID string + clientSecret string + managementEndpoint string + tokenEndPoint string + aadResourceEndPoint string + aadToken string + expiresOn time.Time + cloud *Cloud +} + +// AttachDisk attaches a vhd to vm +// the vhd must exist, can be identified by diskName, diskURI, and lun. +func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return err + } else if !exists { + return cloudprovider.InstanceNotFound + } + disks := *vm.StorageProfile.DataDisks + if isManagedDisk { + disks = append(disks, + compute.DataDisk{ + Name: &diskName, + Lun: &lun, + Caching: cachingMode, + CreateOption: "attach", + ManagedDisk: &compute.ManagedDiskParameters{ + ID: &diskURI, + }, + }) + } else { + disks = append(disks, + compute.DataDisk{ + Name: &diskName, + Vhd: &compute.VirtualHardDisk{ + URI: &diskURI, + }, + Lun: &lun, + Caching: cachingMode, + CreateOption: "attach", + }) + } + + newVM := compute.VirtualMachine{ + Location: vm.Location, + VirtualMachineProperties: &compute.VirtualMachineProperties{ + StorageProfile: &compute.StorageProfile{ + DataDisks: &disks, + }, + }, + } + vmName := mapNodeNameToVMName(nodeName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName) + c.cloud.operationPollRateLimiter.Accept() + respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) + resp := <-respChan + err = <-errChan + if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName) + retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName) + } + } + if err != nil { + glog.Errorf("azureDisk - azure attach failed, err: %v", err) + detail := err.Error() + if strings.Contains(detail, errLeaseFailed) { + // if lease cannot be acquired, immediately detach the disk and return the original error + glog.Infof("azureDisk - failed to acquire disk lease, try detach") + c.cloud.DetachDiskByName(diskName, diskURI, nodeName) + } + } else { + glog.V(4).Infof("azureDisk - azure attach succeeded") + } + return err +} + +// DetachDiskByName detaches a vhd from host +// the vhd can be identified by diskName or diskURI +func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil || !exists { + // if host doesn't exist, no need to detach + glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName) + return nil + } + + disks := *vm.StorageProfile.DataDisks + bFoundDisk := false + for i, disk := range disks { + if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || + (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || + (disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) { + // found the disk + glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI) + disks = append(disks[:i], disks[i+1:]...) + bFoundDisk = true + break + } + } + + if !bFoundDisk { + return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI) + } + + newVM := compute.VirtualMachine{ + Location: vm.Location, + VirtualMachineProperties: &compute.VirtualMachineProperties{ + StorageProfile: &compute.StorageProfile{ + DataDisks: &disks, + }, + }, + } + vmName := mapNodeNameToVMName(nodeName) + glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName) + c.cloud.operationPollRateLimiter.Accept() + respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil) + resp := <-respChan + err = <-errChan + if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { + glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName) + retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM) + if retryErr != nil { + err = retryErr + glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName) + } + } + if err != nil { + glog.Errorf("azureDisk - azure disk detach failed, err: %v", err) + } else { + glog.V(4).Infof("azureDisk - azure disk detach succeeded") + } + return err +} + +// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI +func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return -1, err + } else if !exists { + return -1, cloudprovider.InstanceNotFound + } + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || + (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) || + (disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) { + // found the disk + glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) + return *disk.Lun, nil + } + } + return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName) +} + +// GetNextDiskLun searches all vhd attachment on the host and find unused lun +// return -1 if all luns are used +func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) { + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if err != nil { + return -1, err + } else if !exists { + return -1, cloudprovider.InstanceNotFound + } + used := make([]bool, maxLUN) + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + if disk.Lun != nil { + used[*disk.Lun] = true + } + } + for k, v := range used { + if !v { + return int32(k), nil + } + } + return -1, fmt.Errorf("All Luns are used") +} + +// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName +func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { + attached := make(map[string]bool) + for _, diskName := range diskNames { + attached[diskName] = false + } + vm, exists, err := c.cloud.getVirtualMachine(nodeName) + if !exists { + // if host doesn't exist, no need to detach + glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", + nodeName, diskNames) + return attached, nil + } else if err != nil { + return attached, err + } + + disks := *vm.StorageProfile.DataDisks + for _, disk := range disks { + for _, diskName := range diskNames { + if disk.Name != nil && diskName != "" && *disk.Name == diskName { + attached[diskName] = true + } + } + } + + return attached, nil +} diff --git a/pkg/cloudprovider/providers/azure/azure_file.go b/pkg/cloudprovider/providers/azure/azure_file.go index 97beaf670db..48291128324 100644 --- a/pkg/cloudprovider/providers/azure/azure_file.go +++ b/pkg/cloudprovider/providers/azure/azure_file.go @@ -23,6 +23,10 @@ import ( "github.com/golang/glog" ) +const ( + useHTTPS = true +) + // create file share func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB int) error { fileClient, err := az.getFileSvcClient(accountName, accountKey) @@ -55,7 +59,7 @@ func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error { share := fileClient.GetShareReference(name) return share.Delete(nil) } - return err + return nil } func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) { diff --git a/pkg/cloudprovider/providers/azure/azure_managedDiskController.go b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go new file mode 100644 index 00000000000..5acdf583583 --- /dev/null +++ b/pkg/cloudprovider/providers/azure/azure_managedDiskController.go @@ -0,0 +1,129 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "path" + "strings" + + "github.com/Azure/azure-sdk-for-go/arm/disk" + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/golang/glog" + kwait "k8s.io/apimachinery/pkg/util/wait" +) + +//ManagedDiskController : managed disk controller struct +type ManagedDiskController struct { + common *controllerCommon +} + +func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) { + return &ManagedDiskController{common: common}, nil +} + +//CreateManagedDisk : create managed disk +func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) { + glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB) + + newTags := make(map[string]*string) + azureDDTag := "kubernetes-azure-dd" + newTags["created-by"] = &azureDDTag + + // insert original tags to newTags + if tags != nil { + for k, v := range tags { + // Azure won't allow / (forward slash) in tags + newKey := strings.Replace(k, "/", "-", -1) + newValue := strings.Replace(v, "/", "-", -1) + newTags[newKey] = &newValue + } + } + + diskSizeGB := int32(sizeGB) + model := disk.Model{ + Location: &c.common.location, + Tags: &newTags, + Properties: &disk.Properties{ + AccountType: disk.StorageAccountTypes(storageAccountType), + DiskSizeGB: &diskSizeGB, + CreationData: &disk.CreationData{CreateOption: disk.Empty}, + }} + cancel := make(chan struct{}) + respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel) + <-respChan + err := <-errChan + if err != nil { + return "", err + } + + diskID := "" + + err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) { + provisonState, id, err := c.getDisk(diskName) + diskID = id + // We are waiting for provisioningState==Succeeded + // We don't want to hand-off managed disks to k8s while they are + //still being provisioned, this is to avoid some race conditions + if err != nil { + return false, err + } + if strings.ToLower(provisonState) == "succeeded" { + return true, nil + } + return false, nil + }) + + if err != nil { + glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB) + } else { + glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB) + } + + return diskID, nil +} + +//DeleteManagedDisk : delete managed disk +func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error { + diskName := path.Base(diskURI) + cancel := make(chan struct{}) + respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel) + <-respChan + err := <-errChan + if err != nil { + return err + } + // We don't need poll here, k8s will immediatly stop referencing the disk + // the disk will be evantually deleted - cleanly - by ARM + + glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI) + + return nil +} + +// return: disk provisionState, diskID, error +func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) { + result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName) + if err != nil { + return "", "", err + } + + if result.Properties != nil && (*result.Properties).ProvisioningState != nil { + return *(*result.Properties).ProvisioningState, *result.ID, nil + } + + return "", "", err +} diff --git a/pkg/cloudprovider/providers/azure/azure_storage.go b/pkg/cloudprovider/providers/azure/azure_storage.go index 255d0e1ef11..8572b9c779d 100644 --- a/pkg/cloudprovider/providers/azure/azure_storage.go +++ b/pkg/cloudprovider/providers/azure/azure_storage.go @@ -18,264 +18,10 @@ package azure import ( "fmt" - "strings" - "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/golang/glog" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/volume" ) -const ( - maxLUN = 64 // max number of LUNs per VM - errLeaseFailed = "AcquireDiskLeaseFailed" - errLeaseIDMissing = "LeaseIdMissing" - errContainerNotFound = "ContainerNotFound" -) - -// AttachDisk attaches a vhd to vm -// the vhd must exist, can be identified by diskName, diskURI, and lun. -func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return err - } else if !exists { - return cloudprovider.InstanceNotFound - } - disks := *vm.StorageProfile.DataDisks - disks = append(disks, - compute.DataDisk{ - Name: &diskName, - Vhd: &compute.VirtualHardDisk{ - URI: &diskURI, - }, - Lun: &lun, - Caching: cachingMode, - CreateOption: "attach", - }) - - newVM := compute.VirtualMachine{ - Location: vm.Location, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: &disks, - }, - }, - } - vmName := mapNodeNameToVMName(nodeName) - glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName) - az.operationPollRateLimiter.Accept() - respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - resp := <-respChan - err = <-errChan - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName) - retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName) - } - } - if err != nil { - glog.Errorf("azure attach failed, err: %v", err) - detail := err.Error() - if strings.Contains(detail, errLeaseFailed) { - // if lease cannot be acquired, immediately detach the disk and return the original error - glog.Infof("failed to acquire disk lease, try detach") - az.DetachDiskByName(diskName, diskURI, nodeName) - } - } else { - glog.V(4).Infof("azure attach succeeded") - } - return err -} - -// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName -func (az *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) { - attached := make(map[string]bool) - for _, diskName := range diskNames { - attached[diskName] = false - } - vm, exists, err := az.getVirtualMachine(nodeName) - if !exists { - // if host doesn't exist, no need to detach - glog.Warningf("Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.", - nodeName, diskNames) - return attached, nil - } else if err != nil { - return attached, err - } - - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - for _, diskName := range diskNames { - if disk.Name != nil && diskName != "" && *disk.Name == diskName { - attached[diskName] = true - } - } - } - - return attached, nil -} - -// DetachDiskByName detaches a vhd from host -// the vhd can be identified by diskName or diskURI -func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil || !exists { - // if host doesn't exist, no need to detach - glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName) - return nil - } - - disks := *vm.StorageProfile.DataDisks - for i, disk := range disks { - if (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) { - // found the disk - glog.V(4).Infof("detach disk: name %q uri %q", diskName, diskURI) - disks = append(disks[:i], disks[i+1:]...) - break - } - } - newVM := compute.VirtualMachine{ - Location: vm.Location, - VirtualMachineProperties: &compute.VirtualMachineProperties{ - StorageProfile: &compute.StorageProfile{ - DataDisks: &disks, - }, - }, - } - vmName := mapNodeNameToVMName(nodeName) - glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName) - az.operationPollRateLimiter.Accept() - respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil) - resp := <-respChan - err = <-errChan - if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) { - glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName) - retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM) - if retryErr != nil { - err = retryErr - glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName) - } - } - if err != nil { - glog.Errorf("azure disk detach failed, err: %v", err) - } else { - glog.V(4).Infof("azure disk detach succeeded") - } - return err -} - -// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI -func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound - } - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) { - // found the disk - glog.V(4).Infof("find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI) - return *disk.Lun, nil - } - } - return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName) -} - -// GetNextDiskLun searches all vhd attachment on the host and find unused lun -// return -1 if all luns are used -func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) { - vm, exists, err := az.getVirtualMachine(nodeName) - if err != nil { - return -1, err - } else if !exists { - return -1, cloudprovider.InstanceNotFound - } - used := make([]bool, maxLUN) - disks := *vm.StorageProfile.DataDisks - for _, disk := range disks { - if disk.Lun != nil { - used[*disk.Lun] = true - } - } - for k, v := range used { - if !v { - return int32(k), nil - } - } - return -1, fmt.Errorf("All Luns are used") -} - -// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account. -// If no storage account is given, search all the storage accounts associated with the resource group and pick one that -// fits storage type and location. -func (az *Cloud) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) { - var err error - accounts := []accountWithLocation{} - if len(storageAccount) > 0 { - accounts = append(accounts, accountWithLocation{Name: storageAccount}) - } else { - // find a storage account - accounts, err = az.getStorageAccounts() - if err != nil { - // TODO: create a storage account and container - return "", "", 0, err - } - } - for _, account := range accounts { - glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location) - if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 { - // find the access key with this account - key, err := az.getStorageAccesskey(account.Name) - if err != nil { - glog.V(2).Infof("no key found for storage account %s", account.Name) - continue - } - - // create a page blob in this account's vhd container - name, uri, err := az.createVhdBlob(account.Name, key, name, int64(requestGB), nil) - if err != nil { - glog.V(2).Infof("failed to create vhd in account %s: %v", account.Name, err) - continue - } - glog.V(4).Infof("created vhd blob uri: %s", uri) - return name, uri, requestGB, err - } - } - return "", "", 0, fmt.Errorf("failed to find a matching storage account") -} - -// DeleteVolume deletes a VHD blob -func (az *Cloud) DeleteVolume(name, uri string) error { - accountName, blob, err := az.getBlobNameAndAccountFromURI(uri) - if err != nil { - return fmt.Errorf("failed to parse vhd URI %v", err) - } - key, err := az.getStorageAccesskey(accountName) - if err != nil { - return fmt.Errorf("no key for storage account %s, err %v", accountName, err) - } - err = az.deleteVhdBlob(accountName, key, blob) - if err != nil { - glog.Warningf("failed to delete blob %s err: %v", uri, err) - detail := err.Error() - if strings.Contains(detail, errLeaseIDMissing) { - // disk is still being used - // see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx - return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", name)) - } - return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", uri, accountName, blob, err) - } - glog.V(4).Infof("blob %s deleted", uri) - return nil - -} - // CreateFileShare creates a file share, using a matching storage account func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) { var err error diff --git a/pkg/cloudprovider/providers/azure/azure_util.go b/pkg/cloudprovider/providers/azure/azure_util.go index 0b9c44c0473..5f59da85918 100644 --- a/pkg/cloudprovider/providers/azure/azure_util.go +++ b/pkg/cloudprovider/providers/azure/azure_util.go @@ -19,7 +19,9 @@ package azure import ( "errors" "fmt" + "hash/crc32" "regexp" + "strconv" "strings" "k8s.io/api/core/v1" @@ -293,3 +295,58 @@ func splitProviderID(providerID string) (types.NodeName, error) { } return types.NodeName(matches[1]), nil } + +var polyTable = crc32.MakeTable(crc32.Koopman) + +//MakeCRC32 : convert string to CRC32 format +func MakeCRC32(str string) string { + crc := crc32.New(polyTable) + crc.Write([]byte(str)) + hash := crc.Sum32() + return strconv.FormatUint(uint64(hash), 10) +} + +//ExtractVMData : extract dataDisks, storageProfile from a map struct +func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{}, + storageProfile map[string]interface{}, + hardwareProfile map[string]interface{}, err error) { + props, ok := vmData["properties"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error") + } + + storageProfile, ok = props["storageProfile"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error") + } + + hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error") + } + + dataDisks, ok = storageProfile["dataDisks"].([]interface{}) + if !ok { + return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error") + } + return dataDisks, storageProfile, hardwareProfile, nil +} + +//ExtractDiskData : extract provisioningState, diskState from a map struct +func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) { + fragment, ok := diskData.(map[string]interface{}) + if !ok { + return "", "", fmt.Errorf("convert diskData to map error") + } + + properties, ok := fragment["properties"].(map[string]interface{}) + if !ok { + return "", "", fmt.Errorf("convert diskData(properties) to map error") + } + + provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there + if ref, ok := properties["diskState"]; ok { + diskState = ref.(string) + } + return provisioningState, diskState, nil +} diff --git a/pkg/controller/OWNERS b/pkg/controller/OWNERS index 56f54dddc8e..6b09a34b804 100644 --- a/pkg/controller/OWNERS +++ b/pkg/controller/OWNERS @@ -1,5 +1,4 @@ approvers: -- bprashanth - deads2k - derekwaynecarr - mikedanese diff --git a/pkg/controller/endpoint/OWNERS b/pkg/controller/endpoint/OWNERS index dffc5525e78..3bb8ef23f46 100755 --- a/pkg/controller/endpoint/OWNERS +++ b/pkg/controller/endpoint/OWNERS @@ -1,5 +1,4 @@ reviewers: -- bprashanth - bowei - MrHohn - thockin diff --git a/pkg/controller/history/OWNERS b/pkg/controller/history/OWNERS index 4ff17cf2c72..389a4766b63 100755 --- a/pkg/controller/history/OWNERS +++ b/pkg/controller/history/OWNERS @@ -1,5 +1,4 @@ approvers: -- bprashanth - enisoc - foxish - janetkuo @@ -7,7 +6,6 @@ approvers: - kow3ns - smarterclayton reviewers: -- bprashanth - enisoc - foxish - janetkuo diff --git a/pkg/controller/replicaset/OWNERS b/pkg/controller/replicaset/OWNERS index 34b609fb948..cd1992c3e67 100755 --- a/pkg/controller/replicaset/OWNERS +++ b/pkg/controller/replicaset/OWNERS @@ -2,9 +2,7 @@ approvers: - caesarxuchao - kargakis - lavalamp -- bprashanth reviewers: - caesarxuchao - kargakis - lavalamp -- bprashanth diff --git a/pkg/controller/replication/OWNERS b/pkg/controller/replication/OWNERS index 34b609fb948..cd1992c3e67 100755 --- a/pkg/controller/replication/OWNERS +++ b/pkg/controller/replication/OWNERS @@ -2,9 +2,7 @@ approvers: - caesarxuchao - kargakis - lavalamp -- bprashanth reviewers: - caesarxuchao - kargakis - lavalamp -- bprashanth diff --git a/pkg/controller/service/OWNERS b/pkg/controller/service/OWNERS index 844240a9b29..5e99c8ba0f8 100644 --- a/pkg/controller/service/OWNERS +++ b/pkg/controller/service/OWNERS @@ -1,5 +1,4 @@ reviewers: -- bprashanth - bowei - MrHohn - thockin diff --git a/pkg/controller/statefulset/OWNERS b/pkg/controller/statefulset/OWNERS index 4ff17cf2c72..389a4766b63 100755 --- a/pkg/controller/statefulset/OWNERS +++ b/pkg/controller/statefulset/OWNERS @@ -1,5 +1,4 @@ approvers: -- bprashanth - enisoc - foxish - janetkuo @@ -7,7 +6,6 @@ approvers: - kow3ns - smarterclayton reviewers: -- bprashanth - enisoc - foxish - janetkuo diff --git a/pkg/credentialprovider/azure/BUILD b/pkg/credentialprovider/azure/BUILD index 4f52afc4d36..aacd43a55aa 100644 --- a/pkg/credentialprovider/azure/BUILD +++ b/pkg/credentialprovider/azure/BUILD @@ -17,11 +17,9 @@ go_library( "//pkg/credentialprovider:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", - "//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/github.com/spf13/pflag:go_default_library", - "//vendor/gopkg.in/yaml.v2:go_default_library", ], ) diff --git a/pkg/credentialprovider/azure/azure_credentials.go b/pkg/credentialprovider/azure/azure_credentials.go index bdf618b96d3..257cbee5b81 100644 --- a/pkg/credentialprovider/azure/azure_credentials.go +++ b/pkg/credentialprovider/azure/azure_credentials.go @@ -17,14 +17,12 @@ limitations under the License. package azure import ( - "io/ioutil" + "io" + "os" "time" - yaml "gopkg.in/yaml.v2" - "github.com/Azure/azure-sdk-for-go/arm/containerregistry" "github.com/Azure/go-autorest/autorest" - "github.com/Azure/go-autorest/autorest/adal" azureapi "github.com/Azure/go-autorest/autorest/azure" "github.com/golang/glog" "github.com/spf13/pflag" @@ -47,10 +45,12 @@ func init() { }) } +// RegistriesClient is a testable interface for the ACR client List operation. type RegistriesClient interface { List() (containerregistry.RegistryListResult, error) } +// NewACRProvider parses the specified configFile and returns a DockerConfigProvider func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider { return &acrProvider{ file: configFile, @@ -59,24 +59,16 @@ func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider type acrProvider struct { file *string - config azure.Config - environment azureapi.Environment + config *azure.Config + environment *azureapi.Environment registryClient RegistriesClient } -func (a *acrProvider) loadConfig(contents []byte) error { - err := yaml.Unmarshal(contents, &a.config) +func (a *acrProvider) loadConfig(rdr io.Reader) error { + var err error + a.config, a.environment, err = azure.ParseConfig(rdr) if err != nil { - return err - } - - if a.config.Cloud == "" { - a.environment = azureapi.PublicCloud - } else { - a.environment, err = azureapi.EnvironmentFromName(a.config.Cloud) - if err != nil { - return err - } + glog.Errorf("Failed to load azure credential file: %v", err) } return nil } @@ -86,27 +78,21 @@ func (a *acrProvider) Enabled() bool { glog.V(5).Infof("Azure config unspecified, disabling") return false } - contents, err := ioutil.ReadFile(*a.file) + + f, err := os.Open(*a.file) if err != nil { - glog.Errorf("Failed to load azure credential file: %v", err) + glog.Errorf("Failed to load config from file: %s", *a.file) return false } - if err := a.loadConfig(contents); err != nil { - glog.Errorf("Failed to parse azure credential file: %v", err) + defer f.Close() + + err = a.loadConfig(f) + if err != nil { + glog.Errorf("Failed to load config from file: %s", *a.file) return false } - oauthConfig, err := adal.NewOAuthConfig(a.environment.ActiveDirectoryEndpoint, a.config.TenantID) - if err != nil { - glog.Errorf("Failed to get oauth config: %v", err) - return false - } - - servicePrincipalToken, err := adal.NewServicePrincipalToken( - *oauthConfig, - a.config.AADClientID, - a.config.AADClientSecret, - a.environment.ServiceManagementEndpoint) + servicePrincipalToken, err := azure.GetServicePrincipalToken(a.config, a.environment) if err != nil { glog.Errorf("Failed to create service principal token: %v", err) return false diff --git a/pkg/credentialprovider/azure/azure_credentials_test.go b/pkg/credentialprovider/azure/azure_credentials_test.go index 8f697387768..9d966fe6be5 100644 --- a/pkg/credentialprovider/azure/azure_credentials_test.go +++ b/pkg/credentialprovider/azure/azure_credentials_test.go @@ -17,6 +17,7 @@ limitations under the License. package azure import ( + "bytes" "testing" "github.com/Azure/azure-sdk-for-go/arm/containerregistry" @@ -66,7 +67,7 @@ func Test(t *testing.T) { provider := &acrProvider{ registryClient: fakeClient, } - provider.loadConfig([]byte(configStr)) + provider.loadConfig(bytes.NewBufferString(configStr)) creds := provider.Provide() diff --git a/pkg/kubectl/cmd/apply.go b/pkg/kubectl/cmd/apply.go index 669f5f1e138..ba3192c37bd 100644 --- a/pkg/kubectl/cmd/apply.go +++ b/pkg/kubectl/cmd/apply.go @@ -56,6 +56,7 @@ type ApplyOptions struct { GracePeriod int PruneResources []pruneResource Timeout time.Duration + cmdBaseName string } const ( @@ -65,8 +66,6 @@ const ( backOffPeriod = 1 * time.Second // how many times we can retry before back off triesBeforeBackOff = 1 - - warningNoLastAppliedConfigAnnotation = "Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\n" ) var ( @@ -92,11 +91,17 @@ var ( # Apply the configuration in manifest.yaml and delete all the other configmaps that are not in the file. kubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/ConfigMap`)) + + warningNoLastAppliedConfigAnnotation = "Warning: %[1]s apply should be used on resource created by either %[1]s create --save-config or %[1]s apply\n" ) -func NewCmdApply(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { +func NewCmdApply(baseName string, f cmdutil.Factory, out, errOut io.Writer) *cobra.Command { var options ApplyOptions + // Store baseName for use in printing warnings / messages involving the base command name. + // This is useful for downstream command that wrap this one. + options.cmdBaseName = baseName + cmd := &cobra.Command{ Use: "apply -f FILENAME", Short: i18n.T("Apply a configuration to a resource by filename or stdin"), @@ -299,7 +304,7 @@ func RunApply(f cmdutil.Factory, cmd *cobra.Command, out, errOut io.Writer, opti return err } if _, ok := annotationMap[api.LastAppliedConfigAnnotation]; !ok { - fmt.Fprintf(errOut, warningNoLastAppliedConfigAnnotation) + fmt.Fprintf(errOut, warningNoLastAppliedConfigAnnotation, options.cmdBaseName) } overwrite := cmdutil.GetFlagBool(cmd, "overwrite") helper := resource.NewHelper(info.Client, info.Mapping) diff --git a/pkg/kubectl/cmd/apply_test.go b/pkg/kubectl/cmd/apply_test.go index ef0f9c394da..e166edae0c8 100644 --- a/pkg/kubectl/cmd/apply_test.go +++ b/pkg/kubectl/cmd/apply_test.go @@ -47,7 +47,7 @@ func TestApplyExtraArgsFail(t *testing.T) { errBuf := bytes.NewBuffer([]byte{}) f, _, _, _ := cmdtesting.NewAPIFactory() - c := NewCmdApply(f, buf, errBuf) + c := NewCmdApply("kubectl", f, buf, errBuf) if validateApplyArgs(c, []string{"rc"}) == nil { t.Fatalf("unexpected non-error") } @@ -377,14 +377,14 @@ func TestApplyObjectWithoutAnnotation(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) // uses the name from the file, not the response expectRC := "replicationcontroller/" + nameRC + "\n" - expectWarning := warningNoLastAppliedConfigAnnotation + expectWarning := fmt.Sprintf(warningNoLastAppliedConfigAnnotation, "kubectl") if errBuf.String() != expectWarning { t.Fatalf("unexpected non-warning: %s\nexpected: %s", errBuf.String(), expectWarning) } @@ -422,7 +422,7 @@ func TestApplyObject(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -479,7 +479,7 @@ func TestApplyObjectOutput(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "yaml") cmd.Run(cmd, []string{}) @@ -533,7 +533,7 @@ func TestApplyRetry(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -578,7 +578,7 @@ func TestApplyNonExistObject(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameRC) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -636,7 +636,7 @@ func testApplyMultipleObjects(t *testing.T, asList bool) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) if asList { cmd.Flags().Set("filename", filenameRCSVC) } else { @@ -729,7 +729,7 @@ func TestApplyNULLPreservation(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameDeployObjClientside) cmd.Flags().Set("output", "name") @@ -789,7 +789,7 @@ func TestUnstructuredApply(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameWidgetClientside) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) @@ -876,7 +876,7 @@ func TestUnstructuredIdempotentApply(t *testing.T) { buf := bytes.NewBuffer([]byte{}) errBuf := bytes.NewBuffer([]byte{}) - cmd := NewCmdApply(f, buf, errBuf) + cmd := NewCmdApply("kubectl", f, buf, errBuf) cmd.Flags().Set("filename", filenameWidgetClientside) cmd.Flags().Set("output", "name") cmd.Run(cmd, []string{}) diff --git a/pkg/kubectl/cmd/cmd.go b/pkg/kubectl/cmd/cmd.go index 3d61edd280f..51004668c24 100644 --- a/pkg/kubectl/cmd/cmd.go +++ b/pkg/kubectl/cmd/cmd.go @@ -337,7 +337,7 @@ func NewKubectlCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob { Message: "Advanced Commands:", Commands: []*cobra.Command{ - NewCmdApply(f, out, err), + NewCmdApply("kubectl", f, out, err), NewCmdPatch(f, out), NewCmdReplace(f, out), deprecatedAlias("update", NewCmdReplace(f, out)), diff --git a/pkg/kubectl/cmd/version.go b/pkg/kubectl/cmd/version.go index 3447524ff34..799db251e97 100644 --- a/pkg/kubectl/cmd/version.go +++ b/pkg/kubectl/cmd/version.go @@ -37,6 +37,14 @@ type Version struct { ServerVersion *apimachineryversion.Info `json:"serverVersion,omitempty" yaml:"serverVersion,omitempty"` } +// VersionOptions: describe the options available to users of the "kubectl +// version" command. +type VersionOptions struct { + clientOnly bool + short bool + output string +} + var ( versionExample = templates.Examples(i18n.T(` # Print the client and server versions for the current context @@ -50,66 +58,19 @@ func NewCmdVersion(f cmdutil.Factory, out io.Writer) *cobra.Command { Long: "Print the client and server version information for the current context", Example: versionExample, Run: func(cmd *cobra.Command, args []string) { - err := RunVersion(f, out, cmd) - cmdutil.CheckErr(err) + options := new(VersionOptions) + cmdutil.CheckErr(options.Complete(cmd)) + cmdutil.CheckErr(options.Validate()) + cmdutil.CheckErr(options.Run(f, out)) }, } cmd.Flags().BoolP("client", "c", false, "Client version only (no server required).") cmd.Flags().BoolP("short", "", false, "Print just the version number.") - cmd.Flags().String("output", "", "output format, options available are yaml and json") + cmd.Flags().String("output", "", "one of 'yaml' or 'json'") cmd.Flags().MarkShorthandDeprecated("client", "please use --client instead.") return cmd } -func RunVersion(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error { - var serverVersion *apimachineryversion.Info = nil - var serverErr error = nil - vo := Version{nil, nil} - - clientVersion := version.Get() - vo.ClientVersion = &clientVersion - - if !cmdutil.GetFlagBool(cmd, "client") { - serverVersion, serverErr = retrieveServerVersion(f) - vo.ServerVersion = serverVersion - } - - switch of := cmdutil.GetFlagString(cmd, "output"); of { - case "": - if cmdutil.GetFlagBool(cmd, "short") { - fmt.Fprintf(out, "Client Version: %s\n", clientVersion.GitVersion) - - if serverVersion != nil { - fmt.Fprintf(out, "Server Version: %s\n", serverVersion.GitVersion) - } - } else { - fmt.Fprintf(out, "Client Version: %s\n", fmt.Sprintf("%#v", clientVersion)) - - if serverVersion != nil { - fmt.Fprintf(out, "Server Version: %s\n", fmt.Sprintf("%#v", *serverVersion)) - } - } - case "yaml": - y, err := yaml.Marshal(&vo) - if err != nil { - return err - } - - fmt.Fprintln(out, string(y)) - case "json": - y, err := json.Marshal(&vo) - if err != nil { - return err - } - fmt.Fprintln(out, string(y)) - default: - return errors.New("invalid output format: " + of) - - } - - return serverErr -} - func retrieveServerVersion(f cmdutil.Factory) (*apimachineryversion.Info, error) { discoveryClient, err := f.DiscoveryClient() if err != nil { @@ -120,3 +81,67 @@ func retrieveServerVersion(f cmdutil.Factory) (*apimachineryversion.Info, error) discoveryClient.Invalidate() return discoveryClient.ServerVersion() } + +func (o *VersionOptions) Run(f cmdutil.Factory, out io.Writer) error { + var ( + serverVersion *apimachineryversion.Info + serverErr error + versionInfo Version + ) + + clientVersion := version.Get() + versionInfo.ClientVersion = &clientVersion + + if !o.clientOnly { + serverVersion, serverErr = retrieveServerVersion(f) + versionInfo.ServerVersion = serverVersion + } + + switch o.output { + case "": + if o.short { + fmt.Fprintf(out, "Client Version: %s\n", clientVersion.GitVersion) + if serverVersion != nil { + fmt.Fprintf(out, "Server Version: %s\n", serverVersion.GitVersion) + } + } else { + fmt.Fprintf(out, "Client Version: %s\n", fmt.Sprintf("%#v", clientVersion)) + if serverVersion != nil { + fmt.Fprintf(out, "Server Version: %s\n", fmt.Sprintf("%#v", *serverVersion)) + } + } + case "yaml": + marshalled, err := yaml.Marshal(&versionInfo) + if err != nil { + return err + } + fmt.Fprintln(out, string(marshalled)) + case "json": + marshalled, err := json.Marshal(&versionInfo) + if err != nil { + return err + } + fmt.Fprintln(out, string(marshalled)) + default: + // There is a bug in the program if we hit this case. + // However, we follow a policy of never panicking. + return fmt.Errorf("VersionOptions were not validated: --output=%q should have been rejected", o.output) + } + + return serverErr +} + +func (o *VersionOptions) Complete(cmd *cobra.Command) error { + o.clientOnly = cmdutil.GetFlagBool(cmd, "client") + o.short = cmdutil.GetFlagBool(cmd, "short") + o.output = cmdutil.GetFlagString(cmd, "output") + return nil +} + +func (o *VersionOptions) Validate() error { + if o.output != "" && o.output != "yaml" && o.output != "json" { + return errors.New(`--output must be 'yaml' or 'json'`) + } + + return nil +} diff --git a/pkg/master/OWNERS b/pkg/master/OWNERS index ec262bf10cd..739cc9ee46a 100644 --- a/pkg/master/OWNERS +++ b/pkg/master/OWNERS @@ -18,7 +18,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp diff --git a/pkg/printers/printers.go b/pkg/printers/printers.go index 21775585293..c956170355b 100644 --- a/pkg/printers/printers.go +++ b/pkg/printers/printers.go @@ -91,7 +91,7 @@ func GetStandardPrinter(outputOpts *OutputOptions, noHeaders bool, mapper meta.R case "jsonpath-file": if len(formatArgument) == 0 { - return nil, fmt.Errorf("jsonpath file format specified but no template file file given") + return nil, fmt.Errorf("jsonpath file format specified but no template file given") } data, err := ioutil.ReadFile(formatArgument) if err != nil { diff --git a/pkg/proxy/OWNERS b/pkg/proxy/OWNERS index 646b3c519b1..f1f0145ed00 100644 --- a/pkg/proxy/OWNERS +++ b/pkg/proxy/OWNERS @@ -1,6 +1,5 @@ approvers: - thockin -- bprashanth - matchstick reviewers: - thockin @@ -8,7 +7,6 @@ reviewers: - smarterclayton - brendandburns - vishh -- bprashanth - justinsb - freehan - dcbw diff --git a/pkg/proxy/config/OWNERS b/pkg/proxy/config/OWNERS index 22bdb502d2e..d9bd05962a3 100755 --- a/pkg/proxy/config/OWNERS +++ b/pkg/proxy/config/OWNERS @@ -3,5 +3,4 @@ reviewers: - lavalamp - smarterclayton - brendandburns -- bprashanth - freehan diff --git a/pkg/proxy/iptables/OWNERS b/pkg/proxy/iptables/OWNERS index 1430b9e2e81..d0dffc12400 100755 --- a/pkg/proxy/iptables/OWNERS +++ b/pkg/proxy/iptables/OWNERS @@ -1,7 +1,6 @@ reviewers: - thockin - smarterclayton -- bprashanth - justinsb - freehan - dcbw diff --git a/pkg/proxy/userspace/OWNERS b/pkg/proxy/userspace/OWNERS index 192b2a2b708..4988f45c4dd 100755 --- a/pkg/proxy/userspace/OWNERS +++ b/pkg/proxy/userspace/OWNERS @@ -3,4 +3,3 @@ reviewers: - lavalamp - smarterclayton - freehan -- bprashanth diff --git a/pkg/registry/OWNERS b/pkg/registry/OWNERS index 8b0120e42b6..978192dcac3 100644 --- a/pkg/registry/OWNERS +++ b/pkg/registry/OWNERS @@ -15,7 +15,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp diff --git a/pkg/registry/cachesize/OWNERS b/pkg/registry/cachesize/OWNERS index ab0dcba030e..aea0b7693d9 100755 --- a/pkg/registry/cachesize/OWNERS +++ b/pkg/registry/cachesize/OWNERS @@ -1,6 +1,5 @@ reviewers: - wojtek-t -- bprashanth - gmarek - soltysh - madhusudancs diff --git a/pkg/registry/registrytest/OWNERS b/pkg/registry/registrytest/OWNERS index ca660369ed4..2bfe9edfb7b 100755 --- a/pkg/registry/registrytest/OWNERS +++ b/pkg/registry/registrytest/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - pmorie diff --git a/pkg/volume/OWNERS b/pkg/volume/OWNERS index bbac9263cdc..7d90f372be0 100644 --- a/pkg/volume/OWNERS +++ b/pkg/volume/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/azure_dd/BUILD b/pkg/volume/azure_dd/BUILD index b96669b020e..a60cb736fe8 100644 --- a/pkg/volume/azure_dd/BUILD +++ b/pkg/volume/azure_dd/BUILD @@ -12,12 +12,14 @@ go_library( name = "go_default_library", srcs = [ "attacher.go", + "azure_common.go", "azure_dd.go", + "azure_mounter.go", "azure_provision.go", - "vhd_util.go", ], tags = ["automanaged"], deps = [ + "//pkg/api:go_default_library", "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/azure:go_default_library", "//pkg/util/exec:go_default_library", @@ -26,37 +28,18 @@ go_library( "//pkg/util/strings:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/util:go_default_library", - "//pkg/volume/util/volumehelper:go_default_library", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library", "//vendor/github.com/golang/glog:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", + "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", ], ) -go_test( - name = "go_default_test", - srcs = [ - "azure_dd_test.go", - "vhd_util_test.go", - ], - library = ":go_default_library", - tags = ["automanaged"], - deps = [ - "//pkg/util/exec:go_default_library", - "//pkg/util/mount:go_default_library", - "//pkg/volume:go_default_library", - "//pkg/volume/testing:go_default_library", - "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library", - "//vendor/k8s.io/api/core/v1:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/client-go/util/testing:go_default_library", - ], -) - filegroup( name = "package-srcs", srcs = glob(["**"]), @@ -69,3 +52,20 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = [ + "azure_common_test.go", + "azure_dd_test.go", + ], + library = ":go_default_library", + tags = ["automanaged"], + deps = [ + "//pkg/util/exec:go_default_library", + "//pkg/volume:go_default_library", + "//pkg/volume/testing:go_default_library", + "//vendor/k8s.io/api/core/v1:go_default_library", + "//vendor/k8s.io/client-go/util/testing:go_default_library", + ], +) diff --git a/pkg/volume/azure_dd/OWNERS b/pkg/volume/azure_dd/OWNERS index 51f8d0a1076..5cfad1f70cd 100755 --- a/pkg/volume/azure_dd/OWNERS +++ b/pkg/volume/azure_dd/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/azure_dd/attacher.go b/pkg/volume/azure_dd/attacher.go index 92cb203c468..40c2a1bac88 100644 --- a/pkg/volume/azure_dd/attacher.go +++ b/pkg/volume/azure_dd/attacher.go @@ -26,54 +26,43 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/golang/glog" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" "k8s.io/kubernetes/pkg/util/exec" "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" + volumeutil "k8s.io/kubernetes/pkg/volume/util" ) +type azureDiskDetacher struct { + plugin *azureDataDiskPlugin + cloud *azure.Cloud +} + type azureDiskAttacher struct { - host volume.VolumeHost - azureProvider azureCloudProvider + plugin *azureDataDiskPlugin + cloud *azure.Cloud } var _ volume.Attacher = &azureDiskAttacher{} - -var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{} - -const ( - checkSleepDuration = time.Second -) +var _ volume.Detacher = &azureDiskDetacher{} // acquire lock to get an lun number var getLunMutex = keymutex.NewKeyMutex() -// NewAttacher initializes an Attacher -func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - - return &azureDiskAttacher{ - host: plugin.host, - azureProvider: azure, - }, nil -} - // Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN -func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { +func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { glog.Warningf("failed to get azure disk spec") return "", err } - instanceid, err := attacher.azureProvider.InstanceID(nodeName) + + instanceid, err := a.cloud.InstanceID(nodeName) if err != nil { glog.Warningf("failed to get azure instance id") return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName) @@ -82,7 +71,12 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node instanceid = instanceid[(ind + 1):] } - lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) + diskController, err := getDiskController(a.plugin.host) + if err != nil { + return "", err + } + + lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName) if err == cloudprovider.InstanceNotFound { // Log error and continue with attach glog.Warningf( @@ -98,13 +92,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node getLunMutex.LockKey(instanceid) defer getLunMutex.UnlockKey(instanceid) - lun, err = attacher.azureProvider.GetNextDiskLun(nodeName) + lun, err = diskController.GetNextDiskLun(nodeName) if err != nil { glog.Warningf("no LUN available for instance %q", nodeName) return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid) } glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName) - err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode)) if err == nil { glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName) } else { @@ -116,14 +111,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node return strconv.Itoa(int(lun)), err } -func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { +func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) { volumesAttachedCheck := make(map[*volume.Spec]bool) volumeSpecMap := make(map[string]*volume.Spec) volumeIDList := []string{} for _, spec := range specs { volumeSource, err := getVolumeSource(spec) if err != nil { - glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err) + glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err) continue } @@ -131,11 +126,16 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node volumesAttachedCheck[spec] = true volumeSpecMap[volumeSource.DiskName] = spec } - attachedResult, err := attacher.azureProvider.DisksAreAttached(volumeIDList, nodeName) + + diskController, err := getDiskController(a.plugin.host) + if err != nil { + return nil, err + } + attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName) if err != nil { // Log error and continue with attach glog.Errorf( - "Error checking if volumes (%v) are attached to current node (%q). err=%v", + "azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v", volumeIDList, nodeName, err) return volumesAttachedCheck, err } @@ -144,71 +144,84 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node if !attached { spec := volumeSpecMap[volumeID] volumesAttachedCheck[spec] = false - glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) + glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name()) } } return volumesAttachedCheck, nil } -// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned -func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) { +func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) { + var err error + lun, err := strconv.Atoi(devicePath) + if err != nil { + return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath) + } + volumeSource, err := getVolumeSource(spec) if err != nil { return "", err } - if len(lunStr) == 0 { - return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName) - } + io := &osIOHandler{} + scsiHostRescan(io) - lun, err := strconv.Atoi(lunStr) - if err != nil { - return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err) - } - scsiHostRescan(&osIOHandler{}) - exe := exec.New() - devicePath := "" + diskName := volumeSource.DiskName + nodeName := a.plugin.host.GetHostName() + newDevicePath := "" - err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) { - glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr) - if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil { - if len(devicePath) == 0 { - glog.Warningf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr) - return false, fmt.Errorf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr) - } - glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath) - return true, nil - } else { - //Log error, if any, and continue checking periodically - glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err) - return false, nil + err = wait.Poll(1*time.Second, timeout, func() (bool, error) { + exe := exec.New() + + if newDevicePath, err = findDiskByLun(lun, io, exe); err != nil { + return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err) } + + // did we find it? + if newDevicePath != "" { + // the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on + // Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve + // the root case on Azure. + formatIfNotFormatted(newDevicePath, *volumeSource.FSType) + return true, nil + } + + return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun) }) - return devicePath, err + + return newDevicePath, err } -// GetDeviceMountPath finds the volume's mount path on the node -func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { +// to avoid name conflicts (similar *.vhd name) +// we use hash diskUri and we use it as device mount target. +// this is generalized for both managed and blob disks +// we also prefix the hash with m/b based on disk kind +func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) { volumeSource, err := getVolumeSource(spec) if err != nil { return "", err } - return makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil + if volumeSource.Kind == nil { // this spec was constructed from info on the node + pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI) + return pdPath, nil + } + + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk) } -// MountDevice runs mount command on the node to mount the volume func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error { - mounter := attacher.host.GetMounter() + mounter := attacher.plugin.host.GetMounter() notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath) + if err != nil { if os.IsNotExist(err) { if err := os.MkdirAll(deviceMountPath, 0750); err != nil { - return err + return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err) } notMnt = true } else { - return err + return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err) } } @@ -218,47 +231,27 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str } options := []string{} - if spec.ReadOnly { - options = append(options, "ro") - } if notMnt { diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()} mountOptions := volume.MountOptionFromSpec(spec, options...) err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions) if err != nil { - os.Remove(deviceMountPath) - return err + if cleanErr := os.Remove(deviceMountPath); cleanErr != nil { + return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr) + } + return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err) } } return nil } -type azureDiskDetacher struct { - mounter mount.Interface - azureProvider azureCloudProvider -} - -var _ volume.Detacher = &azureDiskDetacher{} - -// NewDetacher initializes a volume Detacher -func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - return nil, err - } - - return &azureDiskDetacher{ - mounter: plugin.host.GetMounter(), - azureProvider: azure, - }, nil -} - // Detach detaches disk from Azure VM. -func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error { - if diskName == "" { - return fmt.Errorf("invalid disk to detach: %q", diskName) +func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error { + if diskURI == "" { + return fmt.Errorf("invalid disk to detach: %q", diskURI) } - instanceid, err := detacher.azureProvider.InstanceID(nodeName) + + instanceid, err := d.cloud.InstanceID(nodeName) if err != nil { glog.Warningf("no instance id for node %q, skip detaching", nodeName) return nil @@ -267,22 +260,28 @@ func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeNa instanceid = instanceid[(ind + 1):] } - glog.V(4).Infof("detach %v from node %q", diskName, nodeName) - err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName) + glog.V(4).Infof("detach %v from node %q", diskURI, nodeName) + + diskController, err := getDiskController(d.plugin.host) if err != nil { - glog.Errorf("failed to detach azure disk %q, err %v", diskName, err) + return err + } + err = diskController.DetachDiskByName("", diskURI, nodeName) + if err != nil { + glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err) } + glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName) return err } // UnmountDevice unmounts the volume on the node func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error { - volume := path.Base(deviceMountPath) - if err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil { - glog.Errorf("Error unmounting %q: %v", volume, err) - return err + err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter()) + if err == nil { + glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath) } else { - return nil + glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error()) } + return err } diff --git a/pkg/volume/azure_dd/azure_common.go b/pkg/volume/azure_dd/azure_common.go new file mode 100644 index 00000000000..637b75a7959 --- /dev/null +++ b/pkg/volume/azure_dd/azure_common.go @@ -0,0 +1,342 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "regexp" + "strconv" + libstrings "strings" + + storage "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" + "k8s.io/kubernetes/pkg/util/exec" + "k8s.io/kubernetes/pkg/util/mount" + "k8s.io/kubernetes/pkg/util/strings" + "k8s.io/kubernetes/pkg/volume" +) + +const ( + defaultFSType = "ext4" + defaultStorageAccountType = storage.StandardLRS +) + +type dataDisk struct { + volume.MetricsProvider + volumeName string + diskName string + podUID types.UID +} + +var ( + supportedCachingModes = sets.NewString( + string(api.AzureDataDiskCachingNone), + string(api.AzureDataDiskCachingReadOnly), + string(api.AzureDataDiskCachingReadWrite)) + + supportedDiskKinds = sets.NewString( + string(api.AzureSharedBlobDisk), + string(api.AzureDedicatedBlobDisk), + string(api.AzureManagedDisk)) + + supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS") +) + +func getPath(uid types.UID, volName string, host volume.VolumeHost) string { + return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName) +} + +// creates a unique path for disks (even if they share the same *.vhd name) +func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) { + diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps. + uniqueDiskNameTemplate := "%s%s" + hashedDiskUri := azure.MakeCRC32(diskUri) + prefix := "b" + if isManaged { + prefix = "m" + } + // "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }" + diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri) + pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName) + + return pdPath, nil +} + +func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk { + var metricProvider volume.MetricsProvider + if podUID != "" { + metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host)) + } + + return &dataDisk{ + MetricsProvider: metricProvider, + volumeName: volumeName, + diskName: diskName, + podUID: podUID, + } +} + +func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) { + if spec.Volume != nil && spec.Volume.AzureDisk != nil { + return spec.Volume.AzureDisk, nil + } + + if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil { + return spec.PersistentVolume.Spec.AzureDisk, nil + } + + return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type") +} + +func normalizeFsType(fsType string) string { + if fsType == "" { + return defaultFSType + } + + return fsType +} + +func normalizeKind(kind string) (v1.AzureDataDiskKind, error) { + if kind == "" { + return v1.AzureDedicatedBlobDisk, nil + } + + if !supportedDiskKinds.Has(kind) { + return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List()) + } + + return v1.AzureDataDiskKind(kind), nil +} + +func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) { + if storageAccountType == "" { + return defaultStorageAccountType, nil + } + + if !supportedStorageAccountTypes.Has(storageAccountType) { + return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List()) + } + + return storage.SkuName(storageAccountType), nil +} + +func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) { + if cachingMode == "" { + return v1.AzureDataDiskCachingReadWrite, nil + } + + if !supportedCachingModes.Has(string(cachingMode)) { + return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List()) + } + + return cachingMode, nil +} + +type ioHandler interface { + ReadDir(dirname string) ([]os.FileInfo, error) + WriteFile(filename string, data []byte, perm os.FileMode) error + Readlink(name string) (string, error) +} + +//TODO: check if priming the iscsi interface is actually needed + +type osIOHandler struct{} + +func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { + return ioutil.ReadDir(dirname) +} + +func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error { + return ioutil.WriteFile(filename, data, perm) +} + +func (handler *osIOHandler) Readlink(name string) (string, error) { + return os.Readlink(name) +} + +// exclude those used by azure as resource and OS root in /dev/disk/azure +func listAzureDiskPath(io ioHandler) []string { + azureDiskPath := "/dev/disk/azure/" + var azureDiskList []string + if dirs, err := io.ReadDir(azureDiskPath); err == nil { + for _, f := range dirs { + name := f.Name() + diskPath := azureDiskPath + name + if link, linkErr := io.Readlink(diskPath); linkErr == nil { + sd := link[(libstrings.LastIndex(link, "/") + 1):] + azureDiskList = append(azureDiskList, sd) + } + } + } + glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) + return azureDiskList +} + +func scsiHostRescan(io ioHandler) { + scsi_path := "/sys/class/scsi_host/" + if dirs, err := io.ReadDir(scsi_path); err == nil { + for _, f := range dirs { + name := scsi_path + f.Name() + "/scan" + data := []byte("- - -") + if err = io.WriteFile(name, data, 0666); err != nil { + glog.Warningf("failed to rescan scsi host %s", name) + } + } + } else { + glog.Warningf("failed to read %s, err %v", scsi_path, err) + } +} + +func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) { + azureDisks := listAzureDiskPath(io) + return findDiskByLunWithConstraint(lun, io, exe, azureDisks) +} + +// finds a device mounted to "current" node +func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) { + var err error + sys_path := "/sys/bus/scsi/devices" + if dirs, err := io.ReadDir(sys_path); err == nil { + for _, f := range dirs { + name := f.Name() + // look for path like /sys/bus/scsi/devices/3:0:0:1 + arr := libstrings.Split(name, ":") + if len(arr) < 4 { + continue + } + // extract LUN from the path. + // LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1 + l, err := strconv.Atoi(arr[3]) + if err != nil { + // unknown path format, continue to read the next one + glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err) + continue + } + if lun == l { + // find the matching LUN + // read vendor and model to ensure it is a VHD disk + vendor := path.Join(sys_path, name, "vendor") + model := path.Join(sys_path, name, "model") + out, err := exe.Command("cat", vendor, model).CombinedOutput() + if err != nil { + glog.V(4).Infof("azure disk - failed to cat device vendor and model, err: %v", err) + continue + } + matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", libstrings.ToUpper(string(out))) + if err != nil || !matched { + glog.V(4).Infof("azure disk - doesn't match VHD, output %v, error %v", string(out), err) + continue + } + // find a disk, validate name + dir := path.Join(sys_path, name, "block") + if dev, err := io.ReadDir(dir); err == nil { + found := false + for _, diskName := range azureDisks { + glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName) + if string(dev[0].Name()) == diskName { + found = true + break + } + } + if !found { + return "/dev/" + dev[0].Name(), nil + } + } + } + } + } + return "", err +} + +func formatIfNotFormatted(disk string, fstype string) { + notFormatted, err := diskLooksUnformatted(disk) + if err == nil && notFormatted { + args := []string{disk} + // Disk is unformatted so format it. + // Use 'ext4' as the default + if len(fstype) == 0 { + fstype = "ext4" + } + if fstype == "ext4" || fstype == "ext3" { + args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk} + } + glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args) + runner := exec.New() + cmd := runner.Command("mkfs."+fstype, args...) + _, err := cmd.CombinedOutput() + if err == nil { + // the disk has been formatted successfully try to mount it again. + glog.Infof("azureDisk - Disk successfully formatted (mkfs): %s - %s %s", fstype, disk, "tt") + } + glog.Warningf("azureDisk - format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", disk, fstype, "tt", "o", err) + } else { + if err != nil { + glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err) + } else { + glog.Infof("azureDisk - Disk %s already formatted, will not format", disk) + } + } +} + +func diskLooksUnformatted(disk string) (bool, error) { + args := []string{"-nd", "-o", "FSTYPE", disk} + runner := exec.New() + cmd := runner.Command("lsblk", args...) + glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) + dataOut, err := cmd.CombinedOutput() + if err != nil { + glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err) + return false, err + } + output := libstrings.TrimSpace(string(dataOut)) + return output == "", nil +} + +func getDiskController(host volume.VolumeHost) (DiskController, error) { + cloudProvider := host.GetCloudProvider() + az, ok := cloudProvider.(*azure.Cloud) + + if !ok || az == nil { + return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) + } + return az, nil +} + +func getCloud(host volume.VolumeHost) (*azure.Cloud, error) { + cloudProvider := host.GetCloudProvider() + az, ok := cloudProvider.(*azure.Cloud) + + if !ok || az == nil { + return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) + } + return az, nil +} + +func strFirstLetterToUpper(str string) string { + if len(str) < 2 { + return str + } + return libstrings.ToUpper(string(str[0])) + str[1:] +} diff --git a/pkg/volume/azure_dd/vhd_util_test.go b/pkg/volume/azure_dd/azure_common_test.go similarity index 98% rename from pkg/volume/azure_dd/vhd_util_test.go rename to pkg/volume/azure_dd/azure_common_test.go index 93c76721778..b0f4988a9e4 100644 --- a/pkg/volume/azure_dd/vhd_util_test.go +++ b/pkg/volume/azure_dd/azure_common_test.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/volume/azure_dd/azure_dd.go b/pkg/volume/azure_dd/azure_dd.go index 20464dc217a..49b68cdd43a 100644 --- a/pkg/volume/azure_dd/azure_dd.go +++ b/pkg/volume/azure_dd/azure_dd.go @@ -17,67 +17,62 @@ limitations under the License. package azure_dd import ( - "fmt" - "os" - "path" - "github.com/Azure/azure-sdk-for-go/arm/compute" - + storage "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/golang/glog" - "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/cloudprovider" - "k8s.io/kubernetes/pkg/cloudprovider/providers/azure" - "k8s.io/kubernetes/pkg/util/exec" - "k8s.io/kubernetes/pkg/util/keymutex" "k8s.io/kubernetes/pkg/util/mount" - utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util" ) -// This is the primary entrypoint for volume plugins. -func ProbeVolumePlugins() []volume.VolumePlugin { - return []volume.VolumePlugin{&azureDataDiskPlugin{}} -} +// interface exposed by the cloud provider implementing Disk functionlity +type DiskController interface { + CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) + DeleteBlobDisk(diskUri string, wasForced bool) error -type azureDataDiskPlugin struct { - host volume.VolumeHost - volumeLocks keymutex.KeyMutex -} + CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) + DeleteManagedDisk(diskURI string) error -// Abstract interface to disk operations. -// azure cloud provider should implement it -type azureCloudProvider interface { // Attaches the disk to the host machine. - AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error + AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error // Detaches the disk, identified by disk name or uri, from the host machine. DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error + // Check if a list of volumes are attached to the node with the specified NodeName DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) + // Get the LUN number of the disk that is attached to the host GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error) // Get the next available LUN number to attach a new VHD GetNextDiskLun(nodeName types.NodeName) (int32, error) - // InstanceID returns the cloud provider ID of the specified instance. - InstanceID(nodeName types.NodeName) (string, error) + // Create a VHD blob - CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) + CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) // Delete a VHD blob - DeleteVolume(name, uri string) error + DeleteVolume(diskURI string) error +} + +type azureDataDiskPlugin struct { + host volume.VolumeHost } var _ volume.VolumePlugin = &azureDataDiskPlugin{} var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{} +var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{} +var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{} +var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{} const ( azureDataDiskPluginName = "kubernetes.io/azure-disk" ) +func ProbeVolumePlugins() []volume.VolumePlugin { + return []volume.VolumePlugin{&azureDataDiskPlugin{}} +} + func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error { plugin.host = host - plugin.volumeLocks = keymutex.NewKeyMutex() return nil } @@ -91,7 +86,7 @@ func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, err return "", err } - return volumeSource.DiskName, nil + return volumeSource.DataDiskURI, nil } func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool { @@ -117,281 +112,104 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM } } -func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) { - return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter()) -} - -func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) { - // azures used directly in a pod have a ReadOnly flag set by the pod author. - // azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV - azure, err := getVolumeSource(spec) +// NewAttacher initializes an Attacher +func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) { + azure, err := getCloud(plugin.host) if err != nil { + glog.V(4).Infof("failed to get azure cloud in NewAttacher, plugin.host : %s", plugin.host.GetHostName()) return nil, err } - fsType := "ext4" - if azure.FSType != nil { - fsType = *azure.FSType - } - cachingMode := v1.AzureDataDiskCachingNone - if azure.CachingMode != nil { - cachingMode = *azure.CachingMode - } - readOnly := false - if azure.ReadOnly != nil { - readOnly = *azure.ReadOnly - } - diskName := azure.DiskName - diskUri := azure.DataDiskURI - return &azureDiskMounter{ - azureDisk: &azureDisk{ - podUID: podUID, - volName: spec.Name(), - diskName: diskName, - diskUri: diskUri, - cachingMode: cachingMode, - mounter: mounter, - plugin: plugin, - }, - fsType: fsType, - readOnly: readOnly, - diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil -} -func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { - return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter()) -} - -func (plugin *azureDataDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) { - return &azureDiskUnmounter{ - &azureDisk{ - podUID: podUID, - volName: volName, - mounter: mounter, - plugin: plugin, - }, + return &azureDiskAttacher{ + plugin: plugin, + cloud: azure, }, nil } -func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) { - mounter := plugin.host.GetMounter() - pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) - sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) +func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) { + azure, err := getCloud(plugin.host) + if err != nil { + glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName()) + return nil, err + } + + return &azureDiskDetacher{ + plugin: plugin, + cloud: azure, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { + volumeSource, err := getVolumeSource(spec) if err != nil { return nil, err } - azVolume := &v1.Volume{ - Name: volName, + + disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host) + + return &azureDiskDeleter{ + spec: spec, + plugin: plugin, + dataDisk: disk, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { + if len(options.PVC.Spec.AccessModes) == 0 { + options.PVC.Spec.AccessModes = plugin.GetAccessModes() + } + + return &azureDiskProvisioner{ + plugin: plugin, + options: options, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) { + volumeSource, err := getVolumeSource(spec) + if err != nil { + return nil, err + } + disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host) + + return &azureDiskMounter{ + plugin: plugin, + spec: spec, + options: options, + dataDisk: disk, + }, nil +} + +func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) { + disk := makeDataDisk(volName, podUID, "", plugin.host) + + return &azureDiskUnmounter{ + plugin: plugin, + dataDisk: disk, + }, nil +} + +func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) { + mounter := plugin.host.GetMounter() + pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName()) + sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir) + + if err != nil { + return nil, err + } + + azureVolume := &v1.Volume{ + Name: volumeName, VolumeSource: v1.VolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: sourceName, + DataDiskURI: sourceName, }, }, } - return volume.NewSpecFromVolume(azVolume), nil + return volume.NewSpecFromVolume(azureVolume), nil } func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) { - mounter := plugin.host.GetMounter() - return mount.GetMountRefs(mounter, deviceMountPath) -} - -type azureDisk struct { - volName string - podUID types.UID - diskName string - diskUri string - cachingMode v1.AzureDataDiskCachingMode - mounter mount.Interface - plugin *azureDataDiskPlugin - volume.MetricsNil -} - -type azureDiskMounter struct { - *azureDisk - // Filesystem type, optional. - fsType string - // Specifies whether the disk will be attached as read-only. - readOnly bool - // diskMounter provides the interface that is used to mount the actual block device. - diskMounter *mount.SafeFormatAndMount -} - -var _ volume.Mounter = &azureDiskMounter{} - -func (b *azureDiskMounter) GetAttributes() volume.Attributes { - return volume.Attributes{ - ReadOnly: b.readOnly, - Managed: !b.readOnly, - SupportsSELinux: true, - } -} - -// Checks prior to mount operations to verify that the required components (binaries, etc.) -// to mount the volume are available on the underlying node. -// If not, it returns an error -func (b *azureDiskMounter) CanMount() error { - return nil -} - -// SetUp attaches the disk and bind mounts to the volume path. -func (b *azureDiskMounter) SetUp(fsGroup *int64) error { - return b.SetUpAt(b.GetPath(), fsGroup) -} - -// SetUpAt attaches the disk and bind mounts to the volume path. -func (b *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { - b.plugin.volumeLocks.LockKey(b.diskName) - defer b.plugin.volumeLocks.UnlockKey(b.diskName) - - // TODO: handle failed mounts here. - notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) - glog.V(4).Infof("DataDisk set up: %s %v %v", dir, !notMnt, err) - if err != nil && !os.IsNotExist(err) { - glog.Errorf("IsLikelyNotMountPoint failed: %v", err) - return err - } - if !notMnt { - glog.V(4).Infof("%s is a mount point", dir) - return nil - } - - globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskName) - - if err := os.MkdirAll(dir, 0750); err != nil { - glog.V(4).Infof("Could not create directory %s: %v", dir, err) - return err - } - - // Perform a bind mount to the full path to allow duplicate mounts of the same PD. - options := []string{"bind"} - if b.readOnly { - options = append(options, "ro") - } - err = b.mounter.Mount(globalPDPath, dir, "", options) - if err != nil { - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if !notMnt { - if mntErr = b.mounter.Unmount(dir); mntErr != nil { - glog.Errorf("Failed to unmount: %v", mntErr) - return err - } - notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if !notMnt { - // This is very odd, we don't expect it. We'll try again next sync loop. - glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir) - return err - } - } - os.Remove(dir) - return err - } - - if !b.readOnly { - volume.SetVolumeOwnership(b, fsGroup) - } - glog.V(3).Infof("Azure disk volume %s mounted to %s", b.diskName, dir) - return nil -} - -func makeGlobalPDPath(host volume.VolumeHost, volume string) string { - return path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volume) -} - -func (azure *azureDisk) GetPath() string { - name := azureDataDiskPluginName - return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName) -} - -type azureDiskUnmounter struct { - *azureDisk -} - -var _ volume.Unmounter = &azureDiskUnmounter{} - -// Unmounts the bind mount, and detaches the disk only if the PD -// resource was the last reference to that disk on the kubelet. -func (c *azureDiskUnmounter) TearDown() error { - return c.TearDownAt(c.GetPath()) -} - -// Unmounts the bind mount, and detaches the disk only if the PD -// resource was the last reference to that disk on the kubelet. -func (c *azureDiskUnmounter) TearDownAt(dir string) error { - if pathExists, pathErr := util.PathExists(dir); pathErr != nil { - return fmt.Errorf("Error checking if path exists: %v", pathErr) - } else if !pathExists { - glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) - return nil - } - - notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) - if err != nil { - glog.Errorf("Error checking if mountpoint %s: %v", dir, err) - return err - } - if notMnt { - glog.V(2).Info("Not mountpoint, deleting") - return os.Remove(dir) - } - // lock the volume (and thus wait for any concurrrent SetUpAt to finish) - c.plugin.volumeLocks.LockKey(c.diskName) - defer c.plugin.volumeLocks.UnlockKey(c.diskName) - refs, err := mount.GetMountRefs(c.mounter, dir) - if err != nil { - glog.Errorf("Error getting mountrefs for %s: %v", dir, err) - return err - } - if len(refs) == 0 { - glog.Errorf("Did not find pod-mount for %s during tear down", dir) - return fmt.Errorf("%s is not mounted", dir) - } - c.diskName = path.Base(refs[0]) - glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir) - - // Unmount the bind-mount inside this pod - if err := c.mounter.Unmount(dir); err != nil { - glog.Errorf("Error unmounting dir %s %v", dir, err) - return err - } - notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir) - if mntErr != nil { - glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr) - return err - } - if notMnt { - if err := os.Remove(dir); err != nil { - glog.Errorf("Error removing mountpoint %s %v", dir, err) - return err - } - } - return nil -} - -func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) { - if spec.Volume != nil && spec.Volume.AzureDisk != nil { - return spec.Volume.AzureDisk, nil - } - if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil { - return spec.PersistentVolume.Spec.AzureDisk, nil - } - - return nil, fmt.Errorf("Spec does not reference an Azure disk volume type") -} - -// Return cloud provider -func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) { - azureCloudProvider, ok := cloudProvider.(*azure.Cloud) - if !ok || azureCloudProvider == nil { - return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider) - } - - return azureCloudProvider, nil + m := plugin.host.GetMounter() + return mount.GetMountRefs(m, deviceMountPath) } diff --git a/pkg/volume/azure_dd/azure_dd_test.go b/pkg/volume/azure_dd/azure_dd_test.go index 4397347f126..59becdeeada 100644 --- a/pkg/volume/azure_dd/azure_dd_test.go +++ b/pkg/volume/azure_dd/azure_dd_test.go @@ -17,17 +17,11 @@ limitations under the License. package azure_dd import ( - "fmt" "os" - "path" "testing" - "github.com/Azure/azure-sdk-for-go/arm/compute" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" - "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" ) @@ -57,121 +51,5 @@ func TestCanSupport(t *testing.T) { } } -const ( - fakeDiskName = "foo" - fakeDiskUri = "https://azure/vhds/bar.vhd" - fakeLun = 2 -) - -type fakeAzureProvider struct { -} - -func (fake *fakeAzureProvider) AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error { - if diskName != fakeDiskName || diskUri != fakeDiskUri || lun != fakeLun { - return fmt.Errorf("wrong disk") - } - return nil - -} - -func (fake *fakeAzureProvider) DetachDiskByName(diskName, diskUri, vmName string) error { - if diskName != fakeDiskName || diskUri != fakeDiskUri { - return fmt.Errorf("wrong disk") - } - return nil -} -func (fake *fakeAzureProvider) GetDiskLun(diskName, diskUri, vmName string) (int32, error) { - return int32(fakeLun), nil -} - -func (fake *fakeAzureProvider) GetNextDiskLun(vmName string) (int32, error) { - return fakeLun, nil -} -func (fake *fakeAzureProvider) InstanceID(name string) (string, error) { - return "localhost", nil -} - -func (fake *fakeAzureProvider) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) { - return "", "", 0, fmt.Errorf("not implemented") -} - -func (fake *fakeAzureProvider) DeleteVolume(name, uri string) error { - return fmt.Errorf("not implemented") -} - -func TestPlugin(t *testing.T) { - tmpDir, err := utiltesting.MkTmpdir("azure_ddTest") - if err != nil { - t.Fatalf("can't make a temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - plugMgr := volume.VolumePluginMgr{} - plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil)) - - plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName) - if err != nil { - t.Errorf("Can't find the plugin by name") - } - fs := "ext4" - ro := false - caching := v1.AzureDataDiskCachingNone - spec := &v1.Volume{ - Name: "vol1", - VolumeSource: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: fakeDiskName, - DataDiskURI: fakeDiskUri, - FSType: &fs, - CachingMode: &caching, - ReadOnly: &ro, - }, - }, - } - mounter, err := plug.(*azureDataDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}) - if err != nil { - t.Errorf("Failed to make a new Mounter: %v", err) - } - if mounter == nil { - t.Errorf("Got a nil Mounter") - } - volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-disk/vol1") - path := mounter.GetPath() - if path != volPath { - t.Errorf("Got unexpected path: %s, should be %s", path, volPath) - } - - if err := mounter.SetUp(nil); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", path) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - t.Errorf("SetUp() failed, volume path not created: %s", path) - } else { - t.Errorf("SetUp() failed: %v", err) - } - } - - unmounter, err := plug.(*azureDataDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) - if err != nil { - t.Errorf("Failed to make a new Unmounter: %v", err) - } - if unmounter == nil { - t.Errorf("Got a nil Unmounter") - } - - if err := unmounter.TearDown(); err != nil { - t.Errorf("Expected success, got: %v", err) - } - if _, err := os.Stat(path); err == nil { - t.Errorf("TearDown() failed, volume path still exists: %s", path) - } else if !os.IsNotExist(err) { - t.Errorf("SetUp() failed: %v", err) - } -} +// fakeAzureProvider type was removed because all functions were not used +// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test. diff --git a/pkg/volume/azure_dd/azure_mounter.go b/pkg/volume/azure_dd/azure_mounter.go new file mode 100644 index 00000000000..eedb5535f75 --- /dev/null +++ b/pkg/volume/azure_dd/azure_mounter.go @@ -0,0 +1,184 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure_dd + +import ( + "fmt" + "os" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + "k8s.io/kubernetes/pkg/volume" + "k8s.io/kubernetes/pkg/volume/util" +) + +type azureDiskMounter struct { + *dataDisk + spec *volume.Spec + plugin *azureDataDiskPlugin + options volume.VolumeOptions +} + +type azureDiskUnmounter struct { + *dataDisk + plugin *azureDataDiskPlugin +} + +var _ volume.Unmounter = &azureDiskUnmounter{} +var _ volume.Mounter = &azureDiskMounter{} + +func (m *azureDiskMounter) GetAttributes() volume.Attributes { + volumeSource, _ := getVolumeSource(m.spec) + return volume.Attributes{ + ReadOnly: *volumeSource.ReadOnly, + Managed: !*volumeSource.ReadOnly, + SupportsSELinux: true, + } +} + +func (m *azureDiskMounter) CanMount() error { + return nil +} + +func (m *azureDiskMounter) SetUp(fsGroup *int64) error { + return m.SetUpAt(m.GetPath(), fsGroup) +} + +func (m *azureDiskMounter) GetPath() string { + return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host) +} + +func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error { + mounter := m.plugin.host.GetMounter() + volumeSource, err := getVolumeSource(m.spec) + + if err != nil { + glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name()) + return err + } + + diskName := volumeSource.DiskName + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + + if err != nil && !os.IsNotExist(err) { + glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err) + return err + } + if !mountPoint { + return fmt.Errorf("azureDisk - Not a mounting point for disk %s on %s", diskName, dir) + } + + if err := os.MkdirAll(dir, 0750); err != nil { + glog.Infof("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err) + return err + } + + options := []string{"bind"} + + if *volumeSource.ReadOnly { + options = append(options, "ro") + } + + glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir) + isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk) + globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk) + + if err != nil { + return err + } + + mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options) + // Everything in the following control flow is meant as an + // attempt cleanup a failed setupAt (bind mount) + if mountErr != nil { + glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr) + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr) + } + + if !mountPoint { + if err = mounter.Unmount(dir); err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr) + } + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr) + } + if !mountPoint { + // not cool. leave for next sync loop. + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr) + } + } + + if err = os.Remove(dir); err != nil { + return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr) + } + + glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr) + return mountErr + } + + if !*volumeSource.ReadOnly { + volume.SetVolumeOwnership(m, fsGroup) + } + + glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir) + return nil +} + +func (u *azureDiskUnmounter) TearDown() error { + return u.TearDownAt(u.GetPath()) +} + +func (u *azureDiskUnmounter) TearDownAt(dir string) error { + if pathExists, pathErr := util.PathExists(dir); pathErr != nil { + return fmt.Errorf("Error checking if path exists: %v", pathErr) + } else if !pathExists { + glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir) + return nil + } + + glog.V(4).Infof("azureDisk - TearDownAt: %s", dir) + mounter := u.plugin.host.GetMounter() + mountPoint, err := mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err) + } + if mountPoint { + if err := os.Remove(dir); err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err) + } + } + if err := mounter.Unmount(dir); err != nil { + return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err) + } + mountPoint, err = mounter.IsLikelyNotMountPoint(dir) + if err != nil { + return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err) + } + + if mountPoint { + return os.Remove(dir) + } + + return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir") +} + +func (u *azureDiskUnmounter) GetPath() string { + return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host) +} diff --git a/pkg/volume/azure_dd/azure_provision.go b/pkg/volume/azure_dd/azure_provision.go index 67d620ae928..e47da2402e8 100644 --- a/pkg/volume/azure_dd/azure_provision.go +++ b/pkg/volume/azure_dd/azure_provision.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,147 +20,182 @@ import ( "fmt" "strings" - "github.com/golang/glog" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilstrings "k8s.io/kubernetes/pkg/util/strings" "k8s.io/kubernetes/pkg/volume" - "k8s.io/kubernetes/pkg/volume/util/volumehelper" ) -var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{} -var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{} +type azureDiskProvisioner struct { + plugin *azureDataDiskPlugin + options volume.VolumeOptions +} type azureDiskDeleter struct { - *azureDisk - azureProvider azureCloudProvider -} - -func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - - return plugin.newDeleterInternal(spec, azure) -} - -func (plugin *azureDataDiskPlugin) newDeleterInternal(spec *volume.Spec, azure azureCloudProvider) (volume.Deleter, error) { - if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk == nil { - return nil, fmt.Errorf("invalid PV spec") - } - diskName := spec.PersistentVolume.Spec.AzureDisk.DiskName - diskUri := spec.PersistentVolume.Spec.AzureDisk.DataDiskURI - return &azureDiskDeleter{ - azureDisk: &azureDisk{ - volName: spec.Name(), - diskName: diskName, - diskUri: diskUri, - plugin: plugin, - }, - azureProvider: azure, - }, nil -} - -func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) { - azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider()) - if err != nil { - glog.V(4).Infof("failed to get azure provider") - return nil, err - } - if len(options.PVC.Spec.AccessModes) == 0 { - options.PVC.Spec.AccessModes = plugin.GetAccessModes() - } - return plugin.newProvisionerInternal(options, azure) -} - -func (plugin *azureDataDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) { - return &azureDiskProvisioner{ - azureDisk: &azureDisk{ - plugin: plugin, - }, - azureProvider: azure, - options: options, - }, nil -} - -var _ volume.Deleter = &azureDiskDeleter{} - -func (d *azureDiskDeleter) GetPath() string { - name := azureDataDiskPluginName - return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName) -} - -func (d *azureDiskDeleter) Delete() error { - glog.V(4).Infof("deleting volume %s", d.diskUri) - return d.azureProvider.DeleteVolume(d.diskName, d.diskUri) -} - -type azureDiskProvisioner struct { - *azureDisk - azureProvider azureCloudProvider - options volume.VolumeOptions + *dataDisk + spec *volume.Spec + plugin *azureDataDiskPlugin } var _ volume.Provisioner = &azureDiskProvisioner{} +var _ volume.Deleter = &azureDiskDeleter{} -func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { - if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) { - return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes()) +func (d *azureDiskDeleter) GetPath() string { + return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host) +} + +func (d *azureDiskDeleter) Delete() error { + volumeSource, err := getVolumeSource(d.spec) + if err != nil { + return err } - var sku, location, account string + diskController, err := getDiskController(d.plugin.host) + if err != nil { + return err + } + wasStandAlone := (*volumeSource.Kind != v1.AzureSharedBlobDisk) + managed := (*volumeSource.Kind == v1.AzureManagedDisk) + + if managed { + return diskController.DeleteManagedDisk(volumeSource.DataDiskURI) + } + + return diskController.DeleteBlobDisk(volumeSource.DataDiskURI, wasStandAlone) +} + +func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) { + if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) { + return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes()) + } + supportedModes := p.plugin.GetAccessModes() + + // perform static validation first + if p.options.PVC.Spec.Selector != nil { + return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") + } + + if len(p.options.PVC.Spec.AccessModes) > 1 { + return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin") + } + + if len(p.options.PVC.Spec.AccessModes) == 1 { + if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] { + return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes) + } + } + + var ( + location, account string + storageAccountType, fsType string + cachingMode v1.AzureDataDiskCachingMode + strKind string + err error + ) // maxLength = 79 - (4 for ".vhd") = 75 - name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75) - capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] + name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75) + capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)] requestBytes := capacity.Value() requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024)) - // Apply ProvisionerParameters (case-insensitive). We leave validation of - // the values to the cloud provider. - for k, v := range a.options.Parameters { + for k, v := range p.options.Parameters { switch strings.ToLower(k) { case "skuname": - sku = v + storageAccountType = v case "location": location = v case "storageaccount": account = v + case "storageaccounttype": + storageAccountType = v + case "kind": + strKind = v + case "cachingmode": + cachingMode = v1.AzureDataDiskCachingMode(v) + case "fstype": + fsType = strings.ToLower(v) default: - return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName()) + return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k) } } - // TODO: implement c.options.ProvisionerSelector parsing - if a.options.PVC.Spec.Selector != nil { - return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk") - } - diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB) + // normalize values + fsType = normalizeFsType(fsType) + skuName, err := normalizeStorageAccountType(storageAccountType) if err != nil { return nil, err } + kind, err := normalizeKind(strFirstLetterToUpper(strKind)) + if err != nil { + return nil, err + } + + if cachingMode, err = normalizeCachingMode(cachingMode); err != nil { + return nil, err + } + + diskController, err := getDiskController(p.plugin.host) + if err != nil { + return nil, err + } + + // create disk + diskURI := "" + if kind == v1.AzureManagedDisk { + diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags)) + if err != nil { + return nil, err + } + } else { + forceStandAlone := (kind == v1.AzureDedicatedBlobDisk) + if kind == v1.AzureDedicatedBlobDisk { + if location != "" && account != "" { + // use dedicated kind (by default) for compatibility + _, diskURI, _, err = diskController.CreateVolume(name, account, skuName, location, requestGB) + if err != nil { + return nil, err + } + } else { + if location != "" || account != "" { + return nil, fmt.Errorf("AzureDisk - location(%s) and account(%s) must be both empty or specified for dedicated kind, only one value specified is not allowed", + location, account) + } + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) + if err != nil { + return nil, err + } + } + } else { + diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone) + if err != nil { + return nil, err + } + } + } + pv := &v1.PersistentVolume{ ObjectMeta: metav1.ObjectMeta{ - Name: a.options.PVName, + Name: p.options.PVName, Labels: map[string]string{}, Annotations: map[string]string{ - volumehelper.VolumeDynamicallyCreatedByKey: "azure-disk-dynamic-provisioner", + "volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner", }, }, Spec: v1.PersistentVolumeSpec{ - PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy, - AccessModes: a.options.PVC.Spec.AccessModes, + PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy, + AccessModes: supportedModes, Capacity: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)), }, PersistentVolumeSource: v1.PersistentVolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: diskName, - DataDiskURI: diskUri, + CachingMode: &cachingMode, + DiskName: name, + DataDiskURI: diskURI, + Kind: &kind, + FSType: &fsType, }, }, }, diff --git a/pkg/volume/azure_dd/vhd_util.go b/pkg/volume/azure_dd/vhd_util.go deleted file mode 100644 index 8db5093b76f..00000000000 --- a/pkg/volume/azure_dd/vhd_util.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package azure_dd - -import ( - "io/ioutil" - "os" - "path" - "regexp" - "strconv" - "strings" - - "github.com/golang/glog" - "k8s.io/kubernetes/pkg/util/exec" -) - -type ioHandler interface { - ReadDir(dirname string) ([]os.FileInfo, error) - WriteFile(filename string, data []byte, perm os.FileMode) error - Readlink(name string) (string, error) -} - -type osIOHandler struct{} - -func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) { - return ioutil.ReadDir(dirname) -} -func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error { - return ioutil.WriteFile(filename, data, perm) -} -func (handler *osIOHandler) Readlink(name string) (string, error) { - return os.Readlink(name) -} - -// exclude those used by azure as resource and OS root in /dev/disk/azure -func listAzureDiskPath(io ioHandler) []string { - azureDiskPath := "/dev/disk/azure/" - var azureDiskList []string - if dirs, err := io.ReadDir(azureDiskPath); err == nil { - for _, f := range dirs { - name := f.Name() - diskPath := azureDiskPath + name - if link, linkErr := io.Readlink(diskPath); linkErr == nil { - sd := link[(strings.LastIndex(link, "/") + 1):] - azureDiskList = append(azureDiskList, sd) - } - } - } - glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList) - return azureDiskList -} - -// given a LUN find the VHD device path like /dev/sdd -// exclude those disks used by Azure resources and OS root -func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) { - azureDisks := listAzureDiskPath(io) - return findDiskByLunWithConstraint(lun, io, exe, azureDisks) -} - -// look for device /dev/sdX and validate it is a VHD -// return empty string if no disk is found -func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) { - var err error - sys_path := "/sys/bus/scsi/devices" - if dirs, err := io.ReadDir(sys_path); err == nil { - for _, f := range dirs { - name := f.Name() - // look for path like /sys/bus/scsi/devices/3:0:0:1 - arr := strings.Split(name, ":") - if len(arr) < 4 { - continue - } - // extract LUN from the path. - // LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1 - l, err := strconv.Atoi(arr[3]) - if err != nil { - // unknown path format, continue to read the next one - glog.Errorf("failed to parse lun from %v (%v), err %v", arr[3], name, err) - continue - } - if lun == l { - // find the matching LUN - // read vendor and model to ensure it is a VHD disk - vendor := path.Join(sys_path, name, "vendor") - model := path.Join(sys_path, name, "model") - out, err := exe.Command("cat", vendor, model).CombinedOutput() - if err != nil { - glog.Errorf("failed to cat device vendor and model, err: %v", err) - continue - } - matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", strings.ToUpper(string(out))) - if err != nil || !matched { - glog.V(4).Infof("doesn't match VHD, output %v, error %v", string(out), err) - continue - } - // find a disk, validate name - dir := path.Join(sys_path, name, "block") - if dev, err := io.ReadDir(dir); err == nil { - found := false - for _, diskName := range azureDisks { - glog.V(12).Infof("validating disk %q with sys disk %q", dev[0].Name(), diskName) - if string(dev[0].Name()) == diskName { - found = true - break - } - } - if !found { - return "/dev/" + dev[0].Name(), nil - } - } - } - } - } - return "", err -} - -// rescan scsi bus -func scsiHostRescan(io ioHandler) { - scsi_path := "/sys/class/scsi_host/" - if dirs, err := io.ReadDir(scsi_path); err == nil { - for _, f := range dirs { - name := scsi_path + f.Name() + "/scan" - data := []byte("- - -") - if err = io.WriteFile(name, data, 0666); err != nil { - glog.Errorf("failed to rescan scsi host %s", name) - } - } - } else { - glog.Errorf("failed to read %s, err %v", scsi_path, err) - } -} diff --git a/pkg/volume/azure_file/OWNERS b/pkg/volume/azure_file/OWNERS index 51f8d0a1076..5cfad1f70cd 100644 --- a/pkg/volume/azure_file/OWNERS +++ b/pkg/volume/azure_file/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/cephfs/OWNERS b/pkg/volume/cephfs/OWNERS index 510c76e9db3..bda61e5c932 100644 --- a/pkg/volume/cephfs/OWNERS +++ b/pkg/volume/cephfs/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/cinder/OWNERS b/pkg/volume/cinder/OWNERS index 96361ff64e4..b8bd178391f 100644 --- a/pkg/volume/cinder/OWNERS +++ b/pkg/volume/cinder/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/configmap/OWNERS b/pkg/volume/configmap/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/configmap/OWNERS +++ b/pkg/volume/configmap/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/downwardapi/OWNERS b/pkg/volume/downwardapi/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/downwardapi/OWNERS +++ b/pkg/volume/downwardapi/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/empty_dir/OWNERS b/pkg/volume/empty_dir/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/empty_dir/OWNERS +++ b/pkg/volume/empty_dir/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/fc/OWNERS b/pkg/volume/fc/OWNERS index 0c721272070..ad0eff1fc62 100644 --- a/pkg/volume/fc/OWNERS +++ b/pkg/volume/fc/OWNERS @@ -7,7 +7,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/flexvolume/OWNERS b/pkg/volume/flexvolume/OWNERS index 34c7f918d9b..7ac8bb2f613 100644 --- a/pkg/volume/flexvolume/OWNERS +++ b/pkg/volume/flexvolume/OWNERS @@ -11,7 +11,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/flocker/OWNERS b/pkg/volume/flocker/OWNERS index b0a585eec10..663ed96d5bb 100644 --- a/pkg/volume/flocker/OWNERS +++ b/pkg/volume/flocker/OWNERS @@ -9,7 +9,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/gce_pd/OWNERS b/pkg/volume/gce_pd/OWNERS index 3c1271befda..1f138a15291 100644 --- a/pkg/volume/gce_pd/OWNERS +++ b/pkg/volume/gce_pd/OWNERS @@ -4,7 +4,6 @@ approvers: reviewers: - thockin - smarterclayton -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/git_repo/OWNERS b/pkg/volume/git_repo/OWNERS index 7954c84a9a4..dba0b14ec9e 100644 --- a/pkg/volume/git_repo/OWNERS +++ b/pkg/volume/git_repo/OWNERS @@ -7,7 +7,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/glusterfs/OWNERS b/pkg/volume/glusterfs/OWNERS index 4271b66de44..89a204b4b90 100644 --- a/pkg/volume/glusterfs/OWNERS +++ b/pkg/volume/glusterfs/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/host_path/OWNERS b/pkg/volume/host_path/OWNERS index d99a7a678bc..3b57899265f 100644 --- a/pkg/volume/host_path/OWNERS +++ b/pkg/volume/host_path/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/iscsi/OWNERS b/pkg/volume/iscsi/OWNERS index 242946cda63..0d7185f0b45 100644 --- a/pkg/volume/iscsi/OWNERS +++ b/pkg/volume/iscsi/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/nfs/OWNERS b/pkg/volume/nfs/OWNERS index ae747e33483..e5cd1fb42df 100644 --- a/pkg/volume/nfs/OWNERS +++ b/pkg/volume/nfs/OWNERS @@ -9,7 +9,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/quobyte/OWNERS b/pkg/volume/quobyte/OWNERS index 025093af3df..38c978a79d6 100644 --- a/pkg/volume/quobyte/OWNERS +++ b/pkg/volume/quobyte/OWNERS @@ -8,7 +8,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/rbd/OWNERS b/pkg/volume/rbd/OWNERS index 51fdf5e5c0f..f818be5f646 100644 --- a/pkg/volume/rbd/OWNERS +++ b/pkg/volume/rbd/OWNERS @@ -7,7 +7,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/secret/OWNERS b/pkg/volume/secret/OWNERS index b8f17f6f38f..54ffe3c5748 100644 --- a/pkg/volume/secret/OWNERS +++ b/pkg/volume/secret/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/testing/OWNERS b/pkg/volume/testing/OWNERS index 30b3955dd65..f0e4b2d5614 100755 --- a/pkg/volume/testing/OWNERS +++ b/pkg/volume/testing/OWNERS @@ -10,7 +10,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/pkg/volume/vsphere_volume/OWNERS b/pkg/volume/vsphere_volume/OWNERS index f6808465ec8..23d64e4b0ef 100755 --- a/pkg/volume/vsphere_volume/OWNERS +++ b/pkg/volume/vsphere_volume/OWNERS @@ -12,7 +12,6 @@ reviewers: - deads2k - brendandburns - derekwaynecarr -- bprashanth - pmorie - saad-ali - justinsb diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS b/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS index ebaaa56c814..00811f8ffb1 100755 --- a/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS +++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - davidopp - saad-ali diff --git a/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS b/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS index a2e4cf922d4..5769342c223 100755 --- a/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS +++ b/staging/src/k8s.io/apiserver/pkg/registry/rest/OWNERS @@ -9,7 +9,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - kargakis - justinsb diff --git a/staging/src/k8s.io/client-go/OWNERS b/staging/src/k8s.io/client-go/OWNERS index c995c3aae0d..c3606d0a256 100644 --- a/staging/src/k8s.io/client-go/OWNERS +++ b/staging/src/k8s.io/client-go/OWNERS @@ -18,7 +18,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - gmarek - erictune - davidopp diff --git a/staging/src/k8s.io/client-go/kubernetes/BUILD b/staging/src/k8s.io/client-go/kubernetes/BUILD index 089edd660fb..44252b1b176 100644 --- a/staging/src/k8s.io/client-go/kubernetes/BUILD +++ b/staging/src/k8s.io/client-go/kubernetes/BUILD @@ -12,6 +12,7 @@ go_library( srcs = [ "clientset.go", "doc.go", + "import.go", ], tags = ["automanaged"], deps = [ diff --git a/staging/src/k8s.io/client-go/kubernetes/import.go b/staging/src/k8s.io/client-go/kubernetes/import.go new file mode 100644 index 00000000000..c4f9a91bcfb --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/import.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file exists to enforce this clientset's vanity import path. + +package kubernetes // import "k8s.io/client-go/kubernetes" diff --git a/staging/src/k8s.io/client-go/tools/cache/OWNERS b/staging/src/k8s.io/client-go/tools/cache/OWNERS index e923c77092f..78e66f16f9d 100755 --- a/staging/src/k8s.io/client-go/tools/cache/OWNERS +++ b/staging/src/k8s.io/client-go/tools/cache/OWNERS @@ -10,7 +10,6 @@ reviewers: - mikedanese - liggitt - nikhiljindal -- bprashanth - erictune - davidopp - pmorie diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index 3ccb79ae18f..cb196695065 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -716,16 +716,21 @@ func createPD(zone string) (string, error) { } else if TestContext.Provider == "azure" { pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID())) azureCloud, err := GetAzureCloud() + if err != nil { return "", err } - _, diskUri, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */) + if azureCloud.BlobDiskController == nil { + return "", fmt.Errorf("BlobDiskController is nil, it's not expected.") + } + + diskUri, err := azureCloud.BlobDiskController.CreateBlobDisk(pdName, "standard_lrs", 1, false) if err != nil { return "", err } + return diskUri, nil - } else { return "", fmt.Errorf("provider does not support volume creation") } @@ -770,8 +775,11 @@ func deletePD(pdName string) error { if err != nil { return err } + if azureCloud.BlobDiskController == nil { + return fmt.Errorf("BlobDiskController is nil, it's not expected.") + } diskName := pdName[(strings.LastIndex(pdName, "/") + 1):] - err = azureCloud.DeleteVolume(diskName, pdName) + err = azureCloud.BlobDiskController.DeleteBlobDisk(diskName, false) if err != nil { Logf("failed to delete Azure volume %q: %v", pdName, err) return err diff --git a/test/integration/apiserver/BUILD b/test/integration/apiserver/BUILD index 68c943269fc..693a93051e6 100644 --- a/test/integration/apiserver/BUILD +++ b/test/integration/apiserver/BUILD @@ -9,8 +9,10 @@ load( go_test( name = "go_default_test", + size = "large", srcs = [ "apiserver_test.go", + "main_test.go", "patch_test.go", ], tags = [ diff --git a/pkg/cloudprovider/providers/azure/vhd.go b/test/integration/apiserver/main_test.go similarity index 57% rename from pkg/cloudprovider/providers/azure/vhd.go rename to test/integration/apiserver/main_test.go index 93c857743b0..268a3588398 100644 --- a/pkg/cloudprovider/providers/azure/vhd.go +++ b/test/integration/apiserver/main_test.go @@ -1,5 +1,5 @@ /* -Copyright 2016 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,25 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package azure +package apiserver import ( - "bytes" - "encoding/binary" + "testing" - "github.com/rubiojr/go-vhd/vhd" + "k8s.io/kubernetes/test/integration/framework" ) -const ( - vhdHeaderSize = vhd.VHD_HEADER_SIZE -) - -func createVHDHeader(size uint64) ([]byte, error) { - h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{}) - b := new(bytes.Buffer) - err := binary.Write(b, binary.BigEndian, h) - if err != nil { - return nil, err - } - return b.Bytes(), nil +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) } diff --git a/test/integration/auth/BUILD b/test/integration/auth/BUILD index 0cf0e153b4f..685b30d10a8 100644 --- a/test/integration/auth/BUILD +++ b/test/integration/auth/BUILD @@ -9,9 +9,11 @@ load( go_test( name = "go_default_test", + size = "large", srcs = [ "accessreview_test.go", "auth_test.go", + "main_test.go", "node_test.go", "rbac_test.go", ], diff --git a/test/integration/auth/main_test.go b/test/integration/auth/main_test.go new file mode 100644 index 00000000000..4c173c3f174 --- /dev/null +++ b/test/integration/auth/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/client/BUILD b/test/integration/client/BUILD index 179b4b059ca..0a67fe0dc21 100644 --- a/test/integration/client/BUILD +++ b/test/integration/client/BUILD @@ -9,9 +9,11 @@ load( go_test( name = "go_default_test", + size = "large", srcs = [ "client_test.go", "dynamic_client_test.go", + "main_test.go", ], tags = [ "automanaged", diff --git a/test/integration/client/main_test.go b/test/integration/client/main_test.go new file mode 100644 index 00000000000..df04d8ee0c3 --- /dev/null +++ b/test/integration/client/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/configmap/BUILD b/test/integration/configmap/BUILD index 14bc055261b..30b08ad8f41 100644 --- a/test/integration/configmap/BUILD +++ b/test/integration/configmap/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["configmap_test.go"], + size = "large", + srcs = [ + "configmap_test.go", + "main_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/configmap/main_test.go b/test/integration/configmap/main_test.go new file mode 100644 index 00000000000..c05a94633b4 --- /dev/null +++ b/test/integration/configmap/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package configmap + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/defaulttolerationseconds/BUILD b/test/integration/defaulttolerationseconds/BUILD index 0b150dbafa4..f449c7513de 100644 --- a/test/integration/defaulttolerationseconds/BUILD +++ b/test/integration/defaulttolerationseconds/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["defaulttolerationseconds_test.go"], + size = "large", + srcs = [ + "defaulttolerationseconds_test.go", + "main_test.go", + ], tags = [ "automanaged", "etcd", diff --git a/test/integration/defaulttolerationseconds/main_test.go b/test/integration/defaulttolerationseconds/main_test.go new file mode 100644 index 00000000000..c23256fc0d8 --- /dev/null +++ b/test/integration/defaulttolerationseconds/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package defaulttolerationseconds + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/deployment/BUILD b/test/integration/deployment/BUILD index 8b3514e7460..9f103241be0 100644 --- a/test/integration/deployment/BUILD +++ b/test/integration/deployment/BUILD @@ -10,7 +10,11 @@ load( go_test( name = "go_default_test", - srcs = ["deployment_test.go"], + size = "large", + srcs = [ + "deployment_test.go", + "main_test.go", + ], library = ":go_default_library", tags = ["automanaged"], deps = [ diff --git a/test/integration/deployment/main_test.go b/test/integration/deployment/main_test.go new file mode 100644 index 00000000000..769efb3a7ca --- /dev/null +++ b/test/integration/deployment/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployment + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/etcd/BUILD b/test/integration/etcd/BUILD index 5525ca0ff23..1460f4f62b1 100644 --- a/test/integration/etcd/BUILD +++ b/test/integration/etcd/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["etcd_storage_path_test.go"], + size = "large", + srcs = [ + "etcd_storage_path_test.go", + "main_test.go", + ], tags = [ "automanaged", "etcd", diff --git a/test/integration/etcd/etcd_storage_path_test.go b/test/integration/etcd/etcd_storage_path_test.go index fd17823947d..fccf080609a 100644 --- a/test/integration/etcd/etcd_storage_path_test.go +++ b/test/integration/etcd/etcd_storage_path_test.go @@ -588,7 +588,7 @@ func startRealMasterOrDie(t *testing.T, certDir string) (*allClient, clientv3.KV kubeAPIServerOptions := options.NewServerRunOptions() kubeAPIServerOptions.SecureServing.BindAddress = net.ParseIP("127.0.0.1") kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir - kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURLFromEnv()} + kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()} kubeAPIServerOptions.Etcd.DefaultStorageMediaType = runtime.ContentTypeJSON // TODO use protobuf? kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange kubeAPIServerOptions.Authorization.Mode = "RBAC" diff --git a/test/integration/etcd/main_test.go b/test/integration/etcd/main_test.go new file mode 100644 index 00000000000..029421dbe20 --- /dev/null +++ b/test/integration/etcd/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/evictions/BUILD b/test/integration/evictions/BUILD index 03f38476c4f..c6584a33060 100644 --- a/test/integration/evictions/BUILD +++ b/test/integration/evictions/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["evictions_test.go"], + size = "large", + srcs = [ + "evictions_test.go", + "main_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/evictions/main_test.go b/test/integration/evictions/main_test.go new file mode 100644 index 00000000000..5973101c020 --- /dev/null +++ b/test/integration/evictions/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package evictions + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/examples/BUILD b/test/integration/examples/BUILD index 861d2980d21..8d696172022 100644 --- a/test/integration/examples/BUILD +++ b/test/integration/examples/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["apiserver_test.go"], + size = "large", + srcs = [ + "apiserver_test.go", + "main_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/examples/apiserver_test.go b/test/integration/examples/apiserver_test.go index 2c34a30329b..671f4cc84a2 100644 --- a/test/integration/examples/apiserver_test.go +++ b/test/integration/examples/apiserver_test.go @@ -102,7 +102,7 @@ func TestAggregatedAPIServer(t *testing.T) { kubeAPIServerOptions.SecureServing.BindPort = kubePort kubeAPIServerOptions.SecureServing.ServerCert.CertDirectory = certDir kubeAPIServerOptions.InsecureServing.BindPort = 0 - kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURLFromEnv()} + kubeAPIServerOptions.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()} kubeAPIServerOptions.ServiceClusterIPRange = *defaultServiceClusterIPRange kubeAPIServerOptions.Authentication.RequestHeader.UsernameHeaders = []string{"X-Remote-User"} kubeAPIServerOptions.Authentication.RequestHeader.GroupHeaders = []string{"X-Remote-Group"} @@ -190,7 +190,7 @@ func TestAggregatedAPIServer(t *testing.T) { "--requestheader-allowed-names=kube-aggregator", "--authentication-kubeconfig", kubeconfigFile.Name(), "--authorization-kubeconfig", kubeconfigFile.Name(), - "--etcd-servers", framework.GetEtcdURLFromEnv(), + "--etcd-servers", framework.GetEtcdURL(), "--cert-dir", wardleCertDir, }) if err := wardleCmd.Execute(); err != nil { @@ -266,7 +266,7 @@ func TestAggregatedAPIServer(t *testing.T) { "--core-kubeconfig", kubeconfigFile.Name(), "--authentication-kubeconfig", kubeconfigFile.Name(), "--authorization-kubeconfig", kubeconfigFile.Name(), - "--etcd-servers", framework.GetEtcdURLFromEnv(), + "--etcd-servers", framework.GetEtcdURL(), "--cert-dir", aggregatorCertDir, }) if err := aggregatorCmd.Execute(); err != nil { diff --git a/test/integration/examples/main_test.go b/test/integration/examples/main_test.go new file mode 100644 index 00000000000..268a3588398 --- /dev/null +++ b/test/integration/examples/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/federation/BUILD b/test/integration/federation/BUILD index 3dbc08822b5..1bb2e870979 100644 --- a/test/integration/federation/BUILD +++ b/test/integration/federation/BUILD @@ -9,9 +9,11 @@ load( go_test( name = "go_default_test", + size = "large", srcs = [ "api_test.go", "crud_test.go", + "main_test.go", ], tags = [ "automanaged", @@ -22,6 +24,7 @@ go_test( "//federation/pkg/federatedtypes:go_default_library", "//federation/pkg/federatedtypes/crudtester:go_default_library", "//test/integration/federation/framework:go_default_library", + "//test/integration/framework:go_default_library", "//vendor/github.com/pborman/uuid:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library", diff --git a/test/integration/federation/framework/api.go b/test/integration/federation/framework/api.go index 07df7f09a7e..f1d79cd56dd 100644 --- a/test/integration/federation/framework/api.go +++ b/test/integration/federation/framework/api.go @@ -36,7 +36,7 @@ const apiNoun = "federation apiserver" // GetRunOptions returns the default run options that can be used to run a test federation apiserver. func GetRunOptions() *options.ServerRunOptions { r := options.NewServerRunOptions() - r.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURLFromEnv()} + r.Etcd.StorageConfig.ServerList = []string{framework.GetEtcdURL()} // Use a unique prefix to ensure isolation from other tests using the same etcd instance r.Etcd.StorageConfig.Prefix = uuid.New() // Disable secure serving diff --git a/test/integration/federation/main_test.go b/test/integration/federation/main_test.go new file mode 100644 index 00000000000..ccce686e762 --- /dev/null +++ b/test/integration/federation/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federation + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 25b6e766239..ab99ef886d6 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -10,10 +10,14 @@ load( go_library( name = "go_default_library", srcs = [ + "etcd.go", "master_utils.go", "perf_utils.go", "serializer.go", ], + data = [ + "@com_coreos_etcd//:etcd", + ], tags = ["automanaged"], deps = [ "//pkg/api:go_default_library", diff --git a/test/integration/framework/etcd.go b/test/integration/framework/etcd.go new file mode 100644 index 00000000000..dcb4bbc4e23 --- /dev/null +++ b/test/integration/framework/etcd.go @@ -0,0 +1,109 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "fmt" + "hash/adler32" + "io" + "io/ioutil" + "math/rand" + "os" + "os/exec" + "path/filepath" + "sync" + + "k8s.io/kubernetes/pkg/util/env" + + "github.com/golang/glog" +) + +var ( + etcdSetup sync.Once + etcdURL = "" +) + +func setupETCD() { + etcdSetup.Do(func() { + if os.Getenv("RUNFILES_DIR") == "" { + etcdURL = env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379") + return + } + etcdPath := filepath.Join(os.Getenv("RUNFILES_DIR"), "com_coreos_etcd/etcd") + // give every test the same random port each run + etcdPort := 20000 + rand.New(rand.NewSource(int64(adler32.Checksum([]byte(os.Args[0]))))).Intn(5000) + etcdURL = fmt.Sprintf("http://127.0.0.1:%d", etcdPort) + + info, err := os.Stat(etcdPath) + if err != nil { + glog.Fatalf("Unable to stat etcd: %v", err) + } + if info.IsDir() { + glog.Fatalf("Did not expect %q to be a directory", etcdPath) + } + + etcdDataDir, err := ioutil.TempDir(os.TempDir(), "integration_test_etcd_data") + if err != nil { + glog.Fatalf("Unable to make temp etcd data dir: %v", err) + } + glog.Infof("storing etcd data in: %v", etcdDataDir) + + etcdCmd := exec.Command( + etcdPath, + "--data-dir", + etcdDataDir, + "--listen-client-urls", + GetEtcdURL(), + "--advertise-client-urls", + GetEtcdURL(), + "--listen-peer-urls", + "http://127.0.0.1:0", + ) + + stdout, err := etcdCmd.StdoutPipe() + if err != nil { + glog.Fatalf("Failed to run etcd: %v", err) + } + stderr, err := etcdCmd.StderrPipe() + if err != nil { + glog.Fatalf("Failed to run etcd: %v", err) + } + if err := etcdCmd.Start(); err != nil { + glog.Fatalf("Failed to run etcd: %v", err) + } + + go io.Copy(os.Stdout, stdout) + go io.Copy(os.Stderr, stderr) + + go func() { + if err := etcdCmd.Wait(); err != nil { + glog.Fatalf("Failed to run etcd: %v", err) + } + glog.Fatalf("etcd should not have succeeded") + }() + }) +} + +func EtcdMain(tests func() int) { + setupETCD() + os.Exit(tests()) +} + +// return the EtcdURL +func GetEtcdURL() string { + return etcdURL +} diff --git a/test/integration/framework/master_utils.go b/test/integration/framework/master_utils.go index 5de1d866ca7..83485d1d382 100644 --- a/test/integration/framework/master_utils.go +++ b/test/integration/framework/master_utils.go @@ -70,7 +70,6 @@ import ( "k8s.io/kubernetes/pkg/kubectl" kubeletclient "k8s.io/kubernetes/pkg/kubelet/client" "k8s.io/kubernetes/pkg/master" - "k8s.io/kubernetes/pkg/util/env" "k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/plugin/pkg/admission/admit" ) @@ -298,20 +297,13 @@ func parseCIDROrDie(cidr string) *net.IPNet { return parsed } -// return the EtcdURL -func GetEtcdURLFromEnv() string { - url := env.GetEnvAsStringOrFallback("KUBE_INTEGRATION_ETCD_URL", "http://127.0.0.1:2379") - glog.V(4).Infof("Using KUBE_INTEGRATION_ETCD_URL=%q", url) - return url -} - // Returns a basic master config. func NewMasterConfig() *master.Config { // This causes the integration tests to exercise the etcd // prefix code, so please don't change without ensuring // sufficient coverage in other ways. etcdOptions := options.NewEtcdOptions(storagebackend.NewDefaultConfig(uuid.New(), api.Scheme, nil)) - etcdOptions.StorageConfig.ServerList = []string{GetEtcdURLFromEnv()} + etcdOptions.StorageConfig.ServerList = []string{GetEtcdURL()} info, _ := runtime.SerializerInfoForMediaType(api.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON) ns := NewSingleContentTypeSerializer(api.Scheme, info) diff --git a/test/integration/garbagecollector/BUILD b/test/integration/garbagecollector/BUILD index 73857b58f9b..99ad19c09b4 100644 --- a/test/integration/garbagecollector/BUILD +++ b/test/integration/garbagecollector/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["garbage_collector_test.go"], + size = "large", + srcs = [ + "garbage_collector_test.go", + "main_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/garbagecollector/main_test.go b/test/integration/garbagecollector/main_test.go new file mode 100644 index 00000000000..1e2f4b243bc --- /dev/null +++ b/test/integration/garbagecollector/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package garbagecollector + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/kubectl/BUILD b/test/integration/kubectl/BUILD index 6695c9920c7..ac2b6efb19f 100644 --- a/test/integration/kubectl/BUILD +++ b/test/integration/kubectl/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["kubectl_test.go"], + size = "large", + srcs = [ + "kubectl_test.go", + "main_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/kubectl/main_test.go b/test/integration/kubectl/main_test.go new file mode 100644 index 00000000000..9c83e844474 --- /dev/null +++ b/test/integration/kubectl/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubectl + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/master/BUILD b/test/integration/master/BUILD index 894171318b0..91bdb5e4123 100644 --- a/test/integration/master/BUILD +++ b/test/integration/master/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["master_test.go"], + size = "large", + srcs = [ + "main_test.go", + "master_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/master/main_test.go b/test/integration/master/main_test.go new file mode 100644 index 00000000000..10ce6851b8a --- /dev/null +++ b/test/integration/master/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package master + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/metrics/BUILD b/test/integration/metrics/BUILD index 66bb68e1209..054f267672b 100644 --- a/test/integration/metrics/BUILD +++ b/test/integration/metrics/BUILD @@ -29,7 +29,11 @@ filegroup( go_test( name = "go_default_test", - srcs = ["metrics_test.go"], + size = "large", + srcs = [ + "main_test.go", + "metrics_test.go", + ], library = ":go_default_library", tags = [ "automanaged", diff --git a/test/integration/metrics/main_test.go b/test/integration/metrics/main_test.go new file mode 100644 index 00000000000..051ae359ae7 --- /dev/null +++ b/test/integration/metrics/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/objectmeta/BUILD b/test/integration/objectmeta/BUILD index 03f088e7406..f7fb9fd2c75 100644 --- a/test/integration/objectmeta/BUILD +++ b/test/integration/objectmeta/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["objectmeta_test.go"], + size = "large", + srcs = [ + "main_test.go", + "objectmeta_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/objectmeta/main_test.go b/test/integration/objectmeta/main_test.go new file mode 100644 index 00000000000..ca65234eb42 --- /dev/null +++ b/test/integration/objectmeta/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectmeta + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/openshift/BUILD b/test/integration/openshift/BUILD index 0e7de0ebc21..b7e180a750e 100644 --- a/test/integration/openshift/BUILD +++ b/test/integration/openshift/BUILD @@ -9,13 +9,18 @@ load( go_test( name = "go_default_test", - srcs = ["openshift_test.go"], + size = "large", + srcs = [ + "main_test.go", + "openshift_test.go", + ], tags = [ "automanaged", "integration", ], deps = [ "//pkg/master:go_default_library", + "//test/integration/framework:go_default_library", "//vendor/k8s.io/apiserver/pkg/server:go_default_library", ], ) diff --git a/test/integration/openshift/main_test.go b/test/integration/openshift/main_test.go new file mode 100644 index 00000000000..8ff0d17e9f9 --- /dev/null +++ b/test/integration/openshift/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openshift + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/pods/BUILD b/test/integration/pods/BUILD index 242411d7159..75859677e93 100644 --- a/test/integration/pods/BUILD +++ b/test/integration/pods/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["pods_test.go"], + size = "large", + srcs = [ + "main_test.go", + "pods_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/pods/main_test.go b/test/integration/pods/main_test.go new file mode 100644 index 00000000000..337dabe3a33 --- /dev/null +++ b/test/integration/pods/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pods + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/quota/BUILD b/test/integration/quota/BUILD index 02197516d4b..56ef4137809 100644 --- a/test/integration/quota/BUILD +++ b/test/integration/quota/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["quota_test.go"], + size = "large", + srcs = [ + "main_test.go", + "quota_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/quota/main_test.go b/test/integration/quota/main_test.go new file mode 100644 index 00000000000..51110064856 --- /dev/null +++ b/test/integration/quota/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package quota + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/replicaset/BUILD b/test/integration/replicaset/BUILD index b008fc7e522..ce3109baa14 100644 --- a/test/integration/replicaset/BUILD +++ b/test/integration/replicaset/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["replicaset_test.go"], + size = "large", + srcs = [ + "main_test.go", + "replicaset_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/replicaset/main_test.go b/test/integration/replicaset/main_test.go new file mode 100644 index 00000000000..32865709e06 --- /dev/null +++ b/test/integration/replicaset/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicaset + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/replicationcontroller/BUILD b/test/integration/replicationcontroller/BUILD index 085b4188dd2..5595bfc55a4 100644 --- a/test/integration/replicationcontroller/BUILD +++ b/test/integration/replicationcontroller/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["replicationcontroller_test.go"], + size = "large", + srcs = [ + "main_test.go", + "replicationcontroller_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/replicationcontroller/main_test.go b/test/integration/replicationcontroller/main_test.go new file mode 100644 index 00000000000..2cbb1c4ae58 --- /dev/null +++ b/test/integration/replicationcontroller/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package replicationcontroller + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index d851b8d966b..a0753d612d2 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -9,8 +9,10 @@ load( go_test( name = "go_default_test", + size = "large", srcs = [ "extender_test.go", + "main_test.go", "scheduler_test.go", ], tags = [ diff --git a/test/integration/scheduler/main_test.go b/test/integration/scheduler/main_test.go new file mode 100644 index 00000000000..468c04b4d35 --- /dev/null +++ b/test/integration/scheduler/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index 1cebff346db..5f13ba92903 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -35,7 +35,9 @@ go_library( go_test( name = "go_default_test", + size = "large", srcs = [ + "main_test.go", "scheduler_bench_test.go", "scheduler_test.go", ], diff --git a/test/integration/scheduler_perf/main_test.go b/test/integration/scheduler_perf/main_test.go new file mode 100644 index 00000000000..16275396eba --- /dev/null +++ b/test/integration/scheduler_perf/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package benchmark + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/secrets/BUILD b/test/integration/secrets/BUILD index c1e003cef05..b11c608b889 100644 --- a/test/integration/secrets/BUILD +++ b/test/integration/secrets/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["secrets_test.go"], + size = "large", + srcs = [ + "main_test.go", + "secrets_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/secrets/main_test.go b/test/integration/secrets/main_test.go new file mode 100644 index 00000000000..ca7a7b43c77 --- /dev/null +++ b/test/integration/secrets/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package secrets + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/serviceaccount/BUILD b/test/integration/serviceaccount/BUILD index 5a589e720f3..4f2bf67d52c 100644 --- a/test/integration/serviceaccount/BUILD +++ b/test/integration/serviceaccount/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["service_account_test.go"], + size = "large", + srcs = [ + "main_test.go", + "service_account_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/serviceaccount/main_test.go b/test/integration/serviceaccount/main_test.go new file mode 100644 index 00000000000..d8a34a7762b --- /dev/null +++ b/test/integration/serviceaccount/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serviceaccount + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/storageclasses/BUILD b/test/integration/storageclasses/BUILD index 54a575e0107..c9935bb1644 100644 --- a/test/integration/storageclasses/BUILD +++ b/test/integration/storageclasses/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["storage_classes_test.go"], + size = "large", + srcs = [ + "main_test.go", + "storage_classes_test.go", + ], tags = [ "automanaged", "integration", diff --git a/test/integration/storageclasses/main_test.go b/test/integration/storageclasses/main_test.go new file mode 100644 index 00000000000..278bb7b77cb --- /dev/null +++ b/test/integration/storageclasses/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storageclasses + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/ttlcontroller/BUILD b/test/integration/ttlcontroller/BUILD index 806ce9feabc..d0be6d09066 100644 --- a/test/integration/ttlcontroller/BUILD +++ b/test/integration/ttlcontroller/BUILD @@ -9,7 +9,11 @@ load( go_test( name = "go_default_test", - srcs = ["ttlcontroller_test.go"], + size = "large", + srcs = [ + "main_test.go", + "ttlcontroller_test.go", + ], tags = [ "automanaged", "etcd", diff --git a/test/integration/ttlcontroller/main_test.go b/test/integration/ttlcontroller/main_test.go new file mode 100644 index 00000000000..0b75afed038 --- /dev/null +++ b/test/integration/ttlcontroller/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ttlcontroller + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/volume/BUILD b/test/integration/volume/BUILD index dc9eb9f2651..bc91e604373 100644 --- a/test/integration/volume/BUILD +++ b/test/integration/volume/BUILD @@ -9,8 +9,10 @@ load( go_test( name = "go_default_test", + size = "large", srcs = [ "attach_detach_test.go", + "main_test.go", "persistent_volumes_test.go", ], tags = [ diff --git a/test/integration/volume/main_test.go b/test/integration/volume/main_test.go new file mode 100644 index 00000000000..f3c945d05c6 --- /dev/null +++ b/test/integration/volume/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package volume + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/third_party/etcd.BUILD b/third_party/etcd.BUILD new file mode 100644 index 00000000000..816ffa75254 --- /dev/null +++ b/third_party/etcd.BUILD @@ -0,0 +1 @@ +exports_files(["etcd"]) diff --git a/vendor/BUILD b/vendor/BUILD index cf837e14ce8..74820b9c217 100644 --- a/vendor/BUILD +++ b/vendor/BUILD @@ -19,6 +19,7 @@ filegroup( "//vendor/cloud.google.com/go/internal:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:all-srcs", + "//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/arm/network:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:all-srcs", "//vendor/github.com/Azure/azure-sdk-for-go/storage:all-srcs", diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/BUILD b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/BUILD new file mode 100644 index 00000000000..dfc4bdfa13c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/BUILD @@ -0,0 +1,40 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = [ + "client.go", + "disks.go", + "models.go", + "snapshots.go", + "version.go", + ], + tags = ["automanaged"], + deps = [ + "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/date:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/validation:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go new file mode 100644 index 00000000000..8bab7acc132 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/client.go @@ -0,0 +1,53 @@ +// Package disk implements the Azure ARM Disk service API version +// 2016-04-30-preview. +// +// The Disk Resource Provider Client. +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Disk + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Disk. +type ManagementClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/disks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/disks.go new file mode 100644 index 00000000000..4f7fce74fc4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/disks.go @@ -0,0 +1,728 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// DisksClient is the the Disk Resource Provider Client. +type DisksClient struct { + ManagementClient +} + +// NewDisksClient creates an instance of the DisksClient client. +func NewDisksClient(subscriptionID string) DisksClient { + return NewDisksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDisksClientWithBaseURI creates an instance of the DisksClient client. +func NewDisksClientWithBaseURI(baseURI string, subscriptionID string) DisksClient { + return DisksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a disk. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. diskParameter is +// disk object supplied in the body of the Put disk operation. +func (client DisksClient) CreateOrUpdate(resourceGroupName string, diskName string, diskParameter Model, cancel <-chan struct{}) (<-chan Model, <-chan error) { + resultChan := make(chan Model, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: diskParameter, + Constraints: []validation.Constraint{{Target: "diskParameter.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "diskParameter.Properties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "diskParameter.Properties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "diskParameter.Properties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.DisksClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Model + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, diskName, diskParameter, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DisksClient) CreateOrUpdatePreparer(resourceGroupName string, diskName string, diskParameter Model, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(diskParameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DisksClient) CreateOrUpdateResponder(resp *http.Response) (result Model, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a disk. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used to +// cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. +func (client DisksClient) Delete(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, diskName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client DisksClient) DeletePreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DisksClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a disk. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. +func (client DisksClient) Get(resourceGroupName string, diskName string) (result Model, err error) { + req, err := client.GetPreparer(resourceGroupName, diskName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DisksClient) GetPreparer(resourceGroupName string, diskName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DisksClient) GetResponder(resp *http.Response) (result Model, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a disk. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. grantAccessData +// is access data object supplied in the body of the get disk access operation. +func (client DisksClient) GrantAccess(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) { + resultChan := make(chan AccessURI, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.DisksClient", "GrantAccess") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result AccessURI + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GrantAccessPreparer(resourceGroupName, diskName, grantAccessData, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", nil, "Failure preparing request") + return + } + + resp, err := client.GrantAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", resp, "Failure sending request") + return + } + + result, err = client.GrantAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "GrantAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client DisksClient) GrantAccessPreparer(resourceGroupName string, diskName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) GrantAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists all the disks under a subscription. +func (client DisksClient) List() (result ListType, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DisksClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DisksClient) ListResponder(resp *http.Response) (result ListType, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client DisksClient) ListNextResults(lastResults ListType) (result ListType, err error) { + req, err := lastResults.ListTypePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroup lists all the disks under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client DisksClient) ListByResourceGroup(resourceGroupName string) (result ListType, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DisksClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DisksClient) ListByResourceGroupResponder(resp *http.Response) (result ListType, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client DisksClient) ListByResourceGroupNextResults(lastResults ListType) (result ListType, err error) { + req, err := lastResults.ListTypePreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// RevokeAccess revokes access to a disk. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. +func (client DisksClient) RevokeAccess(resourceGroupName string, diskName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RevokeAccessPreparer(resourceGroupName, diskName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + resp, err := client.RevokeAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", resp, "Failure sending request") + return + } + + result, err = client.RevokeAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "RevokeAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client DisksClient) RevokeAccessPreparer(resourceGroupName string, diskName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client DisksClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a disk. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. diskName is the name of +// the disk within the given subscription and resource group. diskParameter is +// disk object supplied in the body of the Patch disk operation. +func (client DisksClient) Update(resourceGroupName string, diskName string, diskParameter UpdateType, cancel <-chan struct{}) (<-chan Model, <-chan error) { + resultChan := make(chan Model, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Model + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, diskName, diskParameter, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.DisksClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client DisksClient) UpdatePreparer(resourceGroupName string, diskName string, diskParameter UpdateType, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "diskName": autorest.Encode("path", diskName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}", pathParameters), + autorest.WithJSON(diskParameter), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client DisksClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client DisksClient) UpdateResponder(resp *http.Response) (result Model, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/models.go new file mode 100644 index 00000000000..e8118696ad5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/models.go @@ -0,0 +1,278 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccessLevel enumerates the values for access level. +type AccessLevel string + +const ( + // None specifies the none state for access level. + None AccessLevel = "None" + // Read specifies the read state for access level. + Read AccessLevel = "Read" +) + +// CreateOption enumerates the values for create option. +type CreateOption string + +const ( + // Attach specifies the attach state for create option. + Attach CreateOption = "Attach" + // Copy specifies the copy state for create option. + Copy CreateOption = "Copy" + // Empty specifies the empty state for create option. + Empty CreateOption = "Empty" + // FromImage specifies the from image state for create option. + FromImage CreateOption = "FromImage" + // Import specifies the import state for create option. + Import CreateOption = "Import" + // Restore specifies the restore state for create option. + Restore CreateOption = "Restore" +) + +// OperatingSystemTypes enumerates the values for operating system types. +type OperatingSystemTypes string + +const ( + // Linux specifies the linux state for operating system types. + Linux OperatingSystemTypes = "Linux" + // Windows specifies the windows state for operating system types. + Windows OperatingSystemTypes = "Windows" +) + +// StorageAccountTypes enumerates the values for storage account types. +type StorageAccountTypes string + +const ( + // PremiumLRS specifies the premium lrs state for storage account types. + PremiumLRS StorageAccountTypes = "Premium_LRS" + // StandardLRS specifies the standard lrs state for storage account types. + StandardLRS StorageAccountTypes = "Standard_LRS" +) + +// AccessURI is a disk access SAS uri. +type AccessURI struct { + autorest.Response `json:"-"` + *AccessURIOutput `json:"properties,omitempty"` +} + +// AccessURIOutput is azure properties, including output. +type AccessURIOutput struct { + *AccessURIRaw `json:"output,omitempty"` +} + +// AccessURIRaw is this object gets 'bubbled up' through flattening. +type AccessURIRaw struct { + AccessSAS *string `json:"accessSAS,omitempty"` +} + +// APIError is api error. +type APIError struct { + Details *[]APIErrorBase `json:"details,omitempty"` + Innererror *InnerError `json:"innererror,omitempty"` + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// APIErrorBase is api error base. +type APIErrorBase struct { + Code *string `json:"code,omitempty"` + Target *string `json:"target,omitempty"` + Message *string `json:"message,omitempty"` +} + +// CreationData is data used when creating a disk. +type CreationData struct { + CreateOption CreateOption `json:"createOption,omitempty"` + StorageAccountID *string `json:"storageAccountId,omitempty"` + ImageReference *ImageDiskReference `json:"imageReference,omitempty"` + SourceURI *string `json:"sourceUri,omitempty"` + SourceResourceID *string `json:"sourceResourceId,omitempty"` +} + +// EncryptionSettings is encryption settings for disk or snapshot +type EncryptionSettings struct { + Enabled *bool `json:"enabled,omitempty"` + DiskEncryptionKey *KeyVaultAndSecretReference `json:"diskEncryptionKey,omitempty"` + KeyEncryptionKey *KeyVaultAndKeyReference `json:"keyEncryptionKey,omitempty"` +} + +// GrantAccessData is data used for requesting a SAS. +type GrantAccessData struct { + Access AccessLevel `json:"access,omitempty"` + DurationInSeconds *int32 `json:"durationInSeconds,omitempty"` +} + +// ImageDiskReference is the source image used for creating the disk. +type ImageDiskReference struct { + ID *string `json:"id,omitempty"` + Lun *int32 `json:"lun,omitempty"` +} + +// InnerError is inner error details. +type InnerError struct { + Exceptiontype *string `json:"exceptiontype,omitempty"` + Errordetail *string `json:"errordetail,omitempty"` +} + +// KeyVaultAndKeyReference is key Vault Key Url and vault id of KeK, KeK is +// optional and when provided is used to unwrap the encryptionKey +type KeyVaultAndKeyReference struct { + SourceVault *SourceVault `json:"sourceVault,omitempty"` + KeyURL *string `json:"keyUrl,omitempty"` +} + +// KeyVaultAndSecretReference is key Vault Secret Url and vault id of the +// encryption key +type KeyVaultAndSecretReference struct { + SourceVault *SourceVault `json:"sourceVault,omitempty"` + SecretURL *string `json:"secretUrl,omitempty"` +} + +// ListType is the List Disks operation response. +type ListType struct { + autorest.Response `json:"-"` + Value *[]Model `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// ListTypePreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client ListType) ListTypePreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// Model is disk resource. +type Model struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *Properties `json:"properties,omitempty"` +} + +// OperationStatusResponse is operation status response +type OperationStatusResponse struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Status *string `json:"status,omitempty"` + StartTime *date.Time `json:"startTime,omitempty"` + EndTime *date.Time `json:"endTime,omitempty"` + Error *APIError `json:"error,omitempty"` +} + +// Properties is disk resource properties. +type Properties struct { + AccountType StorageAccountTypes `json:"accountType,omitempty"` + TimeCreated *date.Time `json:"timeCreated,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` + OwnerID *string `json:"ownerId,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// Resource is the Resource model definition. +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceUpdate is the Resource model definition. +type ResourceUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` +} + +// Snapshot is snapshot resource. +type Snapshot struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + *Properties `json:"properties,omitempty"` +} + +// SnapshotList is the List Snapshots operation response. +type SnapshotList struct { + autorest.Response `json:"-"` + Value *[]Snapshot `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SnapshotListPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SnapshotList) SnapshotListPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SnapshotUpdate is snapshot update resource. +type SnapshotUpdate struct { + Tags *map[string]*string `json:"tags,omitempty"` + *UpdateProperties `json:"properties,omitempty"` +} + +// SourceVault is the vault id is an Azure Resource Manager Resoure id in the +// form +// /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName} +type SourceVault struct { + ID *string `json:"id,omitempty"` +} + +// UpdateProperties is disk resource update properties. +type UpdateProperties struct { + AccountType StorageAccountTypes `json:"accountType,omitempty"` + OsType OperatingSystemTypes `json:"osType,omitempty"` + CreationData *CreationData `json:"creationData,omitempty"` + DiskSizeGB *int32 `json:"diskSizeGB,omitempty"` + EncryptionSettings *EncryptionSettings `json:"encryptionSettings,omitempty"` +} + +// UpdateType is disk update resource. +type UpdateType struct { + Tags *map[string]*string `json:"tags,omitempty"` + *UpdateProperties `json:"properties,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/snapshots.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/snapshots.go new file mode 100644 index 00000000000..f4e5579d045 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/snapshots.go @@ -0,0 +1,733 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "net/http" +) + +// SnapshotsClient is the the Disk Resource Provider Client. +type SnapshotsClient struct { + ManagementClient +} + +// NewSnapshotsClient creates an instance of the SnapshotsClient client. +func NewSnapshotsClient(subscriptionID string) SnapshotsClient { + return NewSnapshotsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSnapshotsClientWithBaseURI creates an instance of the SnapshotsClient +// client. +func NewSnapshotsClientWithBaseURI(baseURI string, subscriptionID string) SnapshotsClient { + return SnapshotsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates a snapshot. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +// snapshot is snapshot object supplied in the body of the Put disk operation. +func (client SnapshotsClient) CreateOrUpdate(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: snapshot, + Constraints: []validation.Constraint{{Target: "snapshot.Properties", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData.ImageReference", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.CreationData.ImageReference.ID", Name: validation.Null, Rule: true, Chain: nil}}}, + }}, + {Target: "snapshot.Properties.EncryptionSettings", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.Properties.EncryptionSettings.DiskEncryptionKey.SecretURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey.SourceVault", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "snapshot.Properties.EncryptionSettings.KeyEncryptionKey.KeyURL", Name: validation.Null, Rule: true, Chain: nil}, + }}, + }}, + }}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.SnapshotsClient", "CreateOrUpdate") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result Snapshot + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.CreateOrUpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SnapshotsClient) CreateOrUpdatePreparer(resourceGroupName string, snapshotName string, snapshot Snapshot, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) CreateOrUpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a snapshot. This method may poll for completion. Polling can +// be canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +func (client SnapshotsClient) Delete(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.DeletePreparer(resourceGroupName, snapshotName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Delete", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// DeletePreparer prepares the Delete request. +func (client SnapshotsClient) DeletePreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) DeleteResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets information about a snapshot. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +func (client SnapshotsClient) Get(resourceGroupName string, snapshotName string) (result Snapshot, err error) { + req, err := client.GetPreparer(resourceGroupName, snapshotName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SnapshotsClient) GetPreparer(resourceGroupName string, snapshotName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GetResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GrantAccess grants access to a snapshot. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +// grantAccessData is access data object supplied in the body of the get +// snapshot access operation. +func (client SnapshotsClient) GrantAccess(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (<-chan AccessURI, <-chan error) { + resultChan := make(chan AccessURI, 1) + errChan := make(chan error, 1) + if err := validation.Validate([]validation.Validation{ + {TargetValue: grantAccessData, + Constraints: []validation.Constraint{{Target: "grantAccessData.DurationInSeconds", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + errChan <- validation.NewErrorWithValidationError(err, "disk.SnapshotsClient", "GrantAccess") + close(errChan) + close(resultChan) + return resultChan, errChan + } + + go func() { + var err error + var result AccessURI + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.GrantAccessPreparer(resourceGroupName, snapshotName, grantAccessData, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", nil, "Failure preparing request") + return + } + + resp, err := client.GrantAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", resp, "Failure sending request") + return + } + + result, err = client.GrantAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "GrantAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// GrantAccessPreparer prepares the GrantAccess request. +func (client SnapshotsClient) GrantAccessPreparer(resourceGroupName string, snapshotName string, grantAccessData GrantAccessData, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/beginGetAccess", pathParameters), + autorest.WithJSON(grantAccessData), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GrantAccessSender sends the GrantAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) GrantAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GrantAccessResponder handles the response to the GrantAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) GrantAccessResponder(resp *http.Response) (result AccessURI, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists snapshots under a subscription. +func (client SnapshotsClient) List() (result SnapshotList, err error) { + req, err := client.ListPreparer() + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client SnapshotsClient) ListPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) ListNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.SnapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure sending next results request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "List", resp, "Failure responding to next results request") + } + + return +} + +// ListByResourceGroup lists snapshots under a resource group. +// +// resourceGroupName is the name of the resource group. +func (client SnapshotsClient) ListByResourceGroup(resourceGroupName string) (result SnapshotList, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client SnapshotsClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) ListByResourceGroupResponder(resp *http.Response) (result SnapshotList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client SnapshotsClient) ListByResourceGroupNextResults(lastResults SnapshotList) (result SnapshotList, err error) { + req, err := lastResults.SnapshotListPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure sending next results request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "ListByResourceGroup", resp, "Failure responding to next results request") + } + + return +} + +// RevokeAccess revokes access to a snapshot. This method may poll for +// completion. Polling can be canceled by passing the cancel channel argument. +// The channel will be used to cancel polling and any outstanding HTTP +// requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +func (client SnapshotsClient) RevokeAccess(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (<-chan OperationStatusResponse, <-chan error) { + resultChan := make(chan OperationStatusResponse, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result OperationStatusResponse + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.RevokeAccessPreparer(resourceGroupName, snapshotName, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", nil, "Failure preparing request") + return + } + + resp, err := client.RevokeAccessSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", resp, "Failure sending request") + return + } + + result, err = client.RevokeAccessResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "RevokeAccess", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// RevokeAccessPreparer prepares the RevokeAccess request. +func (client SnapshotsClient) RevokeAccessPreparer(resourceGroupName string, snapshotName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}/endGetAccess", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// RevokeAccessSender sends the RevokeAccess request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) RevokeAccessSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// RevokeAccessResponder handles the response to the RevokeAccess request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) RevokeAccessResponder(resp *http.Response) (result OperationStatusResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates (patches) a snapshot. This method may poll for completion. +// Polling can be canceled by passing the cancel channel argument. The channel +// will be used to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. snapshotName is the +// name of the snapshot within the given subscription and resource group. +// snapshot is snapshot object supplied in the body of the Patch snapshot +// operation. +func (client SnapshotsClient) Update(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (<-chan Snapshot, <-chan error) { + resultChan := make(chan Snapshot, 1) + errChan := make(chan error, 1) + go func() { + var err error + var result Snapshot + defer func() { + resultChan <- result + errChan <- err + close(resultChan) + close(errChan) + }() + req, err := client.UpdatePreparer(resourceGroupName, snapshotName, snapshot, cancel) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "disk.SnapshotsClient", "Update", resp, "Failure responding to request") + } + }() + return resultChan, errChan +} + +// UpdatePreparer prepares the Update request. +func (client SnapshotsClient) UpdatePreparer(resourceGroupName string, snapshotName string, snapshot SnapshotUpdate, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "snapshotName": autorest.Encode("path", snapshotName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-04-30-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/snapshots/{snapshotName}", pathParameters), + autorest.WithJSON(snapshot), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client SnapshotsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client SnapshotsClient) UpdateResponder(resp *http.Response) (result Snapshot, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go new file mode 100644 index 00000000000..a78b351a0e3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/disk/version.go @@ -0,0 +1,29 @@ +package disk + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 1.0.1.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/v10.0.2-beta arm-disk/2016-04-30-preview" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return "v10.0.2-beta" +} diff --git a/vendor/github.com/Azure/go-ansiterm/BUILD b/vendor/github.com/Azure/go-ansiterm/BUILD index 6da8d8fba4e..edc2de55fd0 100644 --- a/vendor/github.com/Azure/go-ansiterm/BUILD +++ b/vendor/github.com/Azure/go-ansiterm/BUILD @@ -22,7 +22,10 @@ go_library( "parser.go", "parser_action_helpers.go", "parser_actions.go", + "parser_test_helpers.go", + "parser_test_utilities.go", "states.go", + "test_event_handler.go", "utilities.go", ], tags = ["automanaged"], diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md index 261c041e7ab..e25e3821016 100644 --- a/vendor/github.com/Azure/go-ansiterm/README.md +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -7,6 +7,3 @@ For example the parser might receive "ESC, [, A" as a stream of three characters The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). See parser_test.go for examples exercising the state machine and generating appropriate function calls. - ------ -This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go index 96504a33bc9..ebfce8a8d38 100644 --- a/vendor/github.com/Azure/go-ansiterm/constants.go +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -124,32 +124,32 @@ func getByteRange(start byte, end byte) []byte { return bytes } -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() +var ToGroundBytes = getToGroundBytes() +var Executors = getExecuteBytes() // SPACE 20+A0 hex Always and everywhere a blank space // Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) +var Intermeds = getByteRange(0x20, 0x2F) // Parameters 30-3F hex 0123456789:;<=>? // CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) +var CsiParams = getByteRange(0x30, 0x3F) -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) +var CsiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) // Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) +var UpperCase = getByteRange(0x40, 0x5F) // Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) +var LowerCase = getByteRange(0x60, 0x7E) // Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) +var Alphabetics = append(UpperCase, LowerCase...) -var printables = getByteRange(0x20, 0x7F) +var Printables = getByteRange(0x20, 0x7F) -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() +var EscapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var EscapeToGroundBytes = getEscapeToGroundBytes() // See http://www.vt100.net/emu/vt500_parser.png for description of the complex // byte ranges below diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go index 8d66e777c03..d55cc2aec79 100644 --- a/vendor/github.com/Azure/go-ansiterm/context.go +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -1,6 +1,6 @@ package ansiterm -type ansiContext struct { +type AnsiContext struct { currentChar byte paramBuffer []byte interBuffer []byte diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go index 1bd6057da8a..9fd4bd28e23 100644 --- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -1,41 +1,41 @@ package ansiterm -type csiEntryState struct { - baseState +type CsiEntryState struct { + BaseState } -func (csiState csiEntryState) Handle(b byte) (s state, e error) { +func (csiState CsiEntryState) Handle(b byte) (s State, e error) { logger.Infof("CsiEntry::Handle %#x", b) - nextState, err := csiState.baseState.Handle(b) + nextState, err := csiState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): + case sliceContains(Alphabetics, b): + return csiState.parser.Ground, nil + case sliceContains(CsiCollectables, b): + return csiState.parser.CsiParam, nil + case sliceContains(Executors, b): return csiState, csiState.parser.execute() } return csiState, nil } -func (csiState csiEntryState) Transition(s state) error { +func (csiState CsiEntryState) Transition(s State) error { logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) + csiState.BaseState.Transition(s) switch s { - case csiState.parser.ground: + case csiState.parser.Ground: return csiState.parser.csiDispatch() - case csiState.parser.csiParam: + case csiState.parser.CsiParam: switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): + case sliceContains(CsiParams, csiState.parser.context.currentChar): csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): + case sliceContains(Intermeds, csiState.parser.context.currentChar): csiState.parser.collectInter() } } @@ -43,7 +43,7 @@ func (csiState csiEntryState) Transition(s state) error { return nil } -func (csiState csiEntryState) Enter() error { +func (csiState CsiEntryState) Enter() error { csiState.parser.clear() return nil } diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go index 4be35c5fd2a..27807dd35b9 100644 --- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -1,36 +1,36 @@ package ansiterm -type csiParamState struct { - baseState +type CsiParamState struct { + BaseState } -func (csiState csiParamState) Handle(b byte) (s state, e error) { +func (csiState CsiParamState) Handle(b byte) (s State, e error) { logger.Infof("CsiParam::Handle %#x", b) - nextState, err := csiState.baseState.Handle(b) + nextState, err := csiState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): + case sliceContains(Alphabetics, b): + return csiState.parser.Ground, nil + case sliceContains(CsiCollectables, b): csiState.parser.collectParam() return csiState, nil - case sliceContains(executors, b): + case sliceContains(Executors, b): return csiState, csiState.parser.execute() } return csiState, nil } -func (csiState csiParamState) Transition(s state) error { +func (csiState CsiParamState) Transition(s State) error { logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) + csiState.BaseState.Transition(s) switch s { - case csiState.parser.ground: + case csiState.parser.Ground: return csiState.parser.csiDispatch() } diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go index 2189eb6b6b0..b14e0ce9773 100644 --- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -1,34 +1,34 @@ package ansiterm -type escapeIntermediateState struct { - baseState +type EscapeIntermediateState struct { + BaseState } -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - logger.Infof("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) +func (escState EscapeIntermediateState) Handle(b byte) (s State, e error) { + logger.Infof("EscapeIntermediateState::Handle %#x", b) + nextState, err := escState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { - case sliceContains(intermeds, b): + case sliceContains(Intermeds, b): return escState, escState.parser.collectInter() - case sliceContains(executors, b): + case sliceContains(Executors, b): return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil + case sliceContains(EscapeIntermediateToGroundBytes, b): + return escState.parser.Ground, nil } return escState, nil } -func (escState escapeIntermediateState) Transition(s state) error { - logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) +func (escState EscapeIntermediateState) Transition(s State) error { + logger.Infof("EscapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.BaseState.Transition(s) switch s { - case escState.parser.ground: + case escState.parser.Ground: return escState.parser.escDispatch() } diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go index 7b1b9ad3f12..232dd8e0d17 100644 --- a/vendor/github.com/Azure/go-ansiterm/escape_state.go +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -1,47 +1,47 @@ package ansiterm -type escapeState struct { - baseState +type EscapeState struct { + BaseState } -func (escState escapeState) Handle(b byte) (s state, e error) { - logger.Infof("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) +func (escState EscapeState) Handle(b byte) (s State, e error) { + logger.Infof("EscapeState::Handle %#x", b) + nextState, err := escState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil + return escState.parser.CsiEntry, nil case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): + return escState.parser.OscString, nil + case sliceContains(Executors, b): return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil + case sliceContains(EscapeToGroundBytes, b): + return escState.parser.Ground, nil + case sliceContains(Intermeds, b): + return escState.parser.EscapeIntermediate, nil } return escState, nil } -func (escState escapeState) Transition(s state) error { +func (escState EscapeState) Transition(s State) error { logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) + escState.BaseState.Transition(s) switch s { - case escState.parser.ground: + case escState.parser.Ground: return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: + case escState.parser.EscapeIntermediate: return escState.parser.collectInter() } return nil } -func (escState escapeState) Enter() error { +func (escState EscapeState) Enter() error { escState.parser.clear() return nil } diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go index 52451e94693..d600e3e4ae6 100644 --- a/vendor/github.com/Azure/go-ansiterm/ground_state.go +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -1,22 +1,22 @@ package ansiterm -type groundState struct { - baseState +type GroundState struct { + BaseState } -func (gs groundState) Handle(b byte) (s state, e error) { +func (gs GroundState) Handle(b byte) (s State, e error) { gs.parser.context.currentChar = b - nextState, err := gs.baseState.Handle(b) + nextState, err := gs.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { - case sliceContains(printables, b): + case sliceContains(Printables, b): return gs, gs.parser.print() - case sliceContains(executors, b): + case sliceContains(Executors, b): return gs, gs.parser.execute() } diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go index 24062d420eb..4490e3cdf6b 100644 --- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -1,19 +1,19 @@ package ansiterm -type oscStringState struct { - baseState +type OscStringState struct { + BaseState } -func (oscState oscStringState) Handle(b byte) (s state, e error) { +func (oscState OscStringState) Handle(b byte) (s State, e error) { logger.Infof("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) + nextState, err := oscState.BaseState.Handle(b) if nextState != nil || err != nil { return nextState, err } switch { case isOscStringTerminator(b): - return oscState.parser.ground, nil + return oscState.parser.Ground, nil } return oscState, nil diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go index 169f68dbefc..ef5e0ad1998 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -2,6 +2,7 @@ package ansiterm import ( "errors" + "fmt" "io/ioutil" "os" @@ -11,18 +12,18 @@ import ( var logger *logrus.Logger type AnsiParser struct { - currState state + currState State eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state + context *AnsiContext + CsiEntry State + CsiParam State + DcsEntry State + Escape State + EscapeIntermediate State + Error State + Ground State + OscString State + stateMap []State } func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { @@ -40,27 +41,27 @@ func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser parser := &AnsiParser{ eventHandler: evtHandler, - context: &ansiContext{}, + context: &AnsiContext{}, } - parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} - parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} - parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} - parser.escape = escapeState{baseState{name: "Escape", parser: parser}} - parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} - parser.error = errorState{baseState{name: "Error", parser: parser}} - parser.ground = groundState{baseState{name: "Ground", parser: parser}} - parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} + parser.CsiEntry = CsiEntryState{BaseState{name: "CsiEntry", parser: parser}} + parser.CsiParam = CsiParamState{BaseState{name: "CsiParam", parser: parser}} + parser.DcsEntry = DcsEntryState{BaseState{name: "DcsEntry", parser: parser}} + parser.Escape = EscapeState{BaseState{name: "Escape", parser: parser}} + parser.EscapeIntermediate = EscapeIntermediateState{BaseState{name: "EscapeIntermediate", parser: parser}} + parser.Error = ErrorState{BaseState{name: "Error", parser: parser}} + parser.Ground = GroundState{BaseState{name: "Ground", parser: parser}} + parser.OscString = OscStringState{BaseState{name: "OscString", parser: parser}} - parser.stateMap = []state{ - parser.csiEntry, - parser.csiParam, - parser.dcsEntry, - parser.escape, - parser.escapeIntermediate, - parser.error, - parser.ground, - parser.oscString, + parser.stateMap = []State{ + parser.CsiEntry, + parser.CsiParam, + parser.DcsEntry, + parser.Escape, + parser.EscapeIntermediate, + parser.Error, + parser.Ground, + parser.OscString, } parser.currState = getState(initialState, parser.stateMap) @@ -69,7 +70,7 @@ func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser return parser } -func getState(name string, states []state) state { +func getState(name string, states []State) State { for _, el := range states { if el.Name() == name { return el @@ -98,7 +99,7 @@ func (ap *AnsiParser) handle(b byte) error { if newState == nil { logger.Warning("newState is nil") - return errors.New("New state of 'nil' is invalid.") + return errors.New(fmt.Sprintf("New state of 'nil' is invalid.")) } if newState != ap.currState { @@ -110,7 +111,7 @@ func (ap *AnsiParser) handle(b byte) error { return nil } -func (ap *AnsiParser) changeState(newState state) error { +func (ap *AnsiParser) changeState(newState State) error { logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) // Exit old state diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go index 8b69a67a5aa..438802097dd 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -31,7 +31,7 @@ func parseParams(bytes []byte) ([]string, error) { return params, nil } -func parseCmd(context ansiContext) (string, error) { +func parseCmd(context AnsiContext) (string, error) { return string(context.currentChar), nil } diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go index 58750a2d2b1..260e6aae3c5 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -113,7 +113,7 @@ func (ap *AnsiParser) print() error { } func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} + ap.context = &AnsiContext{} return nil } diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_test_helpers.go new file mode 100644 index 00000000000..562f215d342 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_test_helpers.go @@ -0,0 +1,114 @@ +package ansiterm + +import ( + "fmt" + "testing" +) + +func getStateNames() []string { + parser, _ := createTestParser("Ground") + + stateNames := []string{} + for _, state := range parser.stateMap { + stateNames = append(stateNames, state.Name()) + } + + return stateNames +} + +func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) { + for _, b := range bytes { + bytes := []byte{byte(b)} + parser, _ := createTestParser(start) + parser.Parse(bytes) + validateState(t, parser.currState, end) + } +} + +func anyToXHelper(t *testing.T, bytes []byte, expectedState string) { + for _, s := range getStateNames() { + stateTransitionHelper(t, s, expectedState, bytes) + } +} + +func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) { + parser, evtHandler := createTestParser(start) + parser.Parse(bytes) + validateState(t, parser.currState, expected) + validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) +} + +func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) { + params, err := parseParams(bytes) + + if err != nil { + t.Errorf("Parameter parse error: %v", err) + return + } + + if len(params) != len(expectedParams) { + t.Errorf("Parsed parameters: %v", params) + t.Errorf("Expected parameters: %v", expectedParams) + t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams)) + return + } + + for i, v := range expectedParams { + if v != params[i] { + t.Errorf("Parsed parameters: %v", params) + t.Errorf("Expected parameters: %v", expectedParams) + t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i) + } + } +} + +func cursorSingleParamHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) +} + +func cursorTwoParamHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) +} + +func eraseHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)}) + funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) +} + +func scrollHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)}) + funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)}) +} + +func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) { + p, _ := createTestParser(start) + fillContext(p.context) + p.Parse(bytes) + validateState(t, p.currState, end) + validateEmptyContext(t, p.context) +} + +func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) { + parser, evtHandler := createTestParser("Ground") + parser.Parse(bytes) + validateState(t, parser.currState, expectedState) + validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_utilities.go b/vendor/github.com/Azure/go-ansiterm/parser_test_utilities.go new file mode 100644 index 00000000000..51d1d49bf03 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_test_utilities.go @@ -0,0 +1,66 @@ +package ansiterm + +import ( + "testing" +) + +func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) { + evtHandler := CreateTestAnsiEventHandler() + parser := CreateParser(s, evtHandler) + + return parser, evtHandler +} + +func validateState(t *testing.T, actualState State, expectedStateName string) { + actualName := "Nil" + + if actualState != nil { + actualName = actualState.Name() + } + + if actualName != expectedStateName { + t.Errorf("Invalid State: '%s' != '%s'", actualName, expectedStateName) + } +} + +func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) { + actualCount := len(actualCalls) + expectedCount := len(expectedCalls) + + if actualCount != expectedCount { + t.Errorf("Actual calls: %v", actualCalls) + t.Errorf("Expected calls: %v", expectedCalls) + t.Errorf("Call count error: %d != %d", actualCount, expectedCount) + return + } + + for i, v := range actualCalls { + if v != expectedCalls[i] { + t.Errorf("Actual calls: %v", actualCalls) + t.Errorf("Expected calls: %v", expectedCalls) + t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i])) + } + } +} + +func fillContext(context *AnsiContext) { + context.currentChar = 'A' + context.paramBuffer = []byte{'C', 'D', 'E'} + context.interBuffer = []byte{'F', 'G', 'H'} +} + +func validateEmptyContext(t *testing.T, context *AnsiContext) { + var expectedCurrChar byte = 0x0 + if context.currentChar != expectedCurrChar { + t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar) + } + + if len(context.paramBuffer) != 0 { + t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer) + } + + if len(context.paramBuffer) != 0 { + t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer) + } + +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go index f2ea1fcd12d..0cbdcb3c834 100644 --- a/vendor/github.com/Azure/go-ansiterm/states.go +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -1,52 +1,52 @@ package ansiterm -type stateID int +type StateId int -type state interface { +type State interface { Enter() error Exit() error - Handle(byte) (state, error) + Handle(byte) (State, error) Name() string - Transition(state) error + Transition(State) error } -type baseState struct { +type BaseState struct { name string parser *AnsiParser } -func (base baseState) Enter() error { +func (base BaseState) Enter() error { return nil } -func (base baseState) Exit() error { +func (base BaseState) Exit() error { return nil } -func (base baseState) Handle(b byte) (s state, e error) { +func (base BaseState) Handle(b byte) (s State, e error) { switch { case b == CSI_ENTRY: - return base.parser.csiEntry, nil + return base.parser.CsiEntry, nil case b == DCS_ENTRY: - return base.parser.dcsEntry, nil + return base.parser.DcsEntry, nil case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil + return base.parser.Escape, nil case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil + return base.parser.OscString, nil + case sliceContains(ToGroundBytes, b): + return base.parser.Ground, nil } return nil, nil } -func (base baseState) Name() string { +func (base BaseState) Name() string { return base.name } -func (base baseState) Transition(s state) error { - if s == base.parser.ground { +func (base BaseState) Transition(s State) error { + if s == base.parser.Ground { execBytes := []byte{0x18} execBytes = append(execBytes, 0x1A) execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) @@ -62,10 +62,10 @@ func (base baseState) Transition(s state) error { return nil } -type dcsEntryState struct { - baseState +type DcsEntryState struct { + BaseState } -type errorState struct { - baseState +type ErrorState struct { + BaseState } diff --git a/vendor/github.com/Azure/go-ansiterm/test_event_handler.go b/vendor/github.com/Azure/go-ansiterm/test_event_handler.go new file mode 100644 index 00000000000..60f9f30b98d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/test_event_handler.go @@ -0,0 +1,173 @@ +package ansiterm + +import ( + "fmt" + "strconv" +) + +type TestAnsiEventHandler struct { + FunctionCalls []string +} + +func CreateTestAnsiEventHandler() *TestAnsiEventHandler { + evtHandler := TestAnsiEventHandler{} + evtHandler.FunctionCalls = make([]string, 0) + return &evtHandler +} + +func (h *TestAnsiEventHandler) recordCall(call string, params []string) { + s := fmt.Sprintf("%s(%v)", call, params) + h.FunctionCalls = append(h.FunctionCalls, s) +} + +func (h *TestAnsiEventHandler) Print(b byte) error { + h.recordCall("Print", []string{string(b)}) + return nil +} + +func (h *TestAnsiEventHandler) Execute(b byte) error { + h.recordCall("Execute", []string{string(b)}) + return nil +} + +func (h *TestAnsiEventHandler) CUU(param int) error { + h.recordCall("CUU", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUD(param int) error { + h.recordCall("CUD", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUF(param int) error { + h.recordCall("CUF", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUB(param int) error { + h.recordCall("CUB", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CNL(param int) error { + h.recordCall("CNL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CPL(param int) error { + h.recordCall("CPL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CHA(param int) error { + h.recordCall("CHA", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) VPA(param int) error { + h.recordCall("VPA", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUP(x int, y int) error { + xS, yS := strconv.Itoa(x), strconv.Itoa(y) + h.recordCall("CUP", []string{xS, yS}) + return nil +} + +func (h *TestAnsiEventHandler) HVP(x int, y int) error { + xS, yS := strconv.Itoa(x), strconv.Itoa(y) + h.recordCall("HVP", []string{xS, yS}) + return nil +} + +func (h *TestAnsiEventHandler) DECTCEM(visible bool) error { + h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)}) + return nil +} + +func (h *TestAnsiEventHandler) DECOM(visible bool) error { + h.recordCall("DECOM", []string{strconv.FormatBool(visible)}) + return nil +} + +func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error { + h.recordCall("DECOLM", []string{strconv.FormatBool(use132)}) + return nil +} + +func (h *TestAnsiEventHandler) ED(param int) error { + h.recordCall("ED", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) EL(param int) error { + h.recordCall("EL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) IL(param int) error { + h.recordCall("IL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DL(param int) error { + h.recordCall("DL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) ICH(param int) error { + h.recordCall("ICH", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DCH(param int) error { + h.recordCall("DCH", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) SGR(params []int) error { + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.recordCall("SGR", strings) + return nil +} + +func (h *TestAnsiEventHandler) SU(param int) error { + h.recordCall("SU", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) SD(param int) error { + h.recordCall("SD", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DA(params []string) error { + h.recordCall("DA", params) + return nil +} + +func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error { + topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom) + h.recordCall("DECSTBM", []string{topS, bottomS}) + return nil +} + +func (h *TestAnsiEventHandler) RI() error { + h.recordCall("RI", nil) + return nil +} + +func (h *TestAnsiEventHandler) IND() error { + h.recordCall("IND", nil) + return nil +} + +func (h *TestAnsiEventHandler) Flush() error { + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go index daf2f069615..78fe92fe65f 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -9,7 +9,7 @@ import ( "strings" "syscall" - "github.com/Azure/go-ansiterm" + . "github.com/Azure/go-ansiterm" ) // Windows keyboard constants @@ -85,17 +85,17 @@ func newAnsiCommand(command []byte) *ansiCommand { if lastCharIndex != 0 { start := 1 // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY { start++ } // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP) } return ac } -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue SHORT) SHORT { if index < 0 || index >= len(ac.Parameters) { return defaultValue } @@ -105,7 +105,7 @@ func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { return defaultValue } - return int16(param) + return SHORT(param) } func (ac *ansiCommand) String() string { @@ -119,12 +119,12 @@ func (ac *ansiCommand) String() string { // See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. func isAnsiCommandChar(b byte) bool { switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY: return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM: // non-CSI escape sequence terminator return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + case b == ANSI_CMD_STR_TERM || b == ANSI_BEL: // String escape sequence terminator return true } @@ -132,11 +132,11 @@ func isAnsiCommandChar(b byte) bool { } func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) + return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL) } func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) + return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3) } // bytesToHex converts a slice of bytes to a human-readable string. @@ -150,7 +150,7 @@ func bytesToHex(b []byte) string { // ensureInRange adjusts the passed value, if necessary, to ensure it is within // the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { +func ensureInRange(n SHORT, min SHORT, max SHORT) SHORT { if n < min { return min } else if n > max { diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go index 462d92f8ef9..1f2f3853cc4 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -66,21 +66,21 @@ const ( // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). // Clearing all foreground or background colors results in black; setting all creates white. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F + FOREGROUND_BLUE WORD = 0x0001 + FOREGROUND_GREEN WORD = 0x0002 + FOREGROUND_RED WORD = 0x0004 + FOREGROUND_INTENSITY WORD = 0x0008 + FOREGROUND_MASK WORD = 0x000F - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 + BACKGROUND_BLUE WORD = 0x0010 + BACKGROUND_GREEN WORD = 0x0020 + BACKGROUND_RED WORD = 0x0040 + BACKGROUND_INTENSITY WORD = 0x0080 + BACKGROUND_MASK WORD = 0x00F0 - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 + COMMON_LVB_MASK WORD = 0xFF00 + COMMON_LVB_REVERSE_VIDEO WORD = 0x4000 + COMMON_LVB_UNDERSCORE WORD = 0x8000 // Input event types // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. @@ -104,53 +104,60 @@ const ( ) // Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/aa383751(v=vs.85).aspx for core types (e.g., SHORT) // -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) // -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment type ( + SHORT int16 + BOOL int32 + WORD uint16 + WCHAR uint16 + DWORD uint32 + CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 + UnicodeChar WCHAR + Attributes WORD } CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 + Size DWORD + Visible BOOL } CONSOLE_SCREEN_BUFFER_INFO struct { Size COORD CursorPosition COORD - Attributes uint16 + Attributes WORD Window SMALL_RECT MaximumWindowSize COORD } COORD struct { - X int16 - Y int16 + X SHORT + Y SHORT } SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 + Left SHORT + Top SHORT + Right SHORT + Bottom SHORT } // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. INPUT_RECORD struct { - EventType uint16 + EventType WORD KeyEvent KEY_EVENT_RECORD } KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 + KeyDown BOOL + RepeatCount WORD + VirtualKeyCode WORD + VirtualScanCode WORD + UnicodeChar WCHAR + ControlKeyState DWORD } WINDOW_BUFFER_SIZE struct { @@ -158,12 +165,12 @@ type ( } ) -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { +// boolToBOOL converts a Go bool into a Windows BOOL. +func boolToBOOL(f bool) BOOL { if f { - return int32(1) + return BOOL(1) } else { - return int32(0) + return BOOL(0) } } @@ -235,7 +242,7 @@ func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { // SetConsoleTextAttribute sets the attributes of characters written to the // console screen buffer by the WriteFile or WriteConsole function. // See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { +func SetConsoleTextAttribute(handle uintptr, attribute WORD) error { r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) use(attribute) return checkError(r1, r2, err) @@ -273,7 +280,7 @@ func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) erro // It returns true if the handle was signaled; false otherwise. // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(DWORD(msWait))) switch r1 { case WAIT_ABANDONED, WAIT_TIMEOUT: return false, nil @@ -313,8 +320,8 @@ func checkError(r1, r2 uintptr, err error) error { // coordToPointer converts a COORD into a uintptr (by fooling the type system). func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to DWORD is just to get a pointer to pass. + return uintptr(*((*DWORD)(unsafe.Pointer(&c)))) } // use is a no-op, but the compiler cannot see that it is. diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go index cbec8f728f4..94665db6fb0 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -2,7 +2,9 @@ package winterm -import "github.com/Azure/go-ansiterm" +import ( + . "github.com/Azure/go-ansiterm" +) const ( FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE @@ -11,83 +13,83 @@ const ( // collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the // request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { +func collectAnsiIntoWindowsAttributes(windowsMode WORD, inverted bool, baseMode WORD, ansiMode SHORT) (WORD, bool) { switch ansiMode { // Mode styles - case ansiterm.ANSI_SGR_BOLD: + case ANSI_SGR_BOLD: windowsMode = windowsMode | FOREGROUND_INTENSITY - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + case ANSI_SGR_DIM, ANSI_SGR_BOLD_DIM_OFF: windowsMode &^= FOREGROUND_INTENSITY - case ansiterm.ANSI_SGR_UNDERLINE: + case ANSI_SGR_UNDERLINE: windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - case ansiterm.ANSI_SGR_REVERSE: + case ANSI_SGR_REVERSE: inverted = true - case ansiterm.ANSI_SGR_REVERSE_OFF: + case ANSI_SGR_REVERSE_OFF: inverted = false - case ansiterm.ANSI_SGR_UNDERLINE_OFF: + case ANSI_SGR_UNDERLINE_OFF: windowsMode &^= COMMON_LVB_UNDERSCORE // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + case ANSI_SGR_FOREGROUND_DEFAULT: windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + case ANSI_SGR_FOREGROUND_BLACK: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - case ansiterm.ANSI_SGR_FOREGROUND_RED: + case ANSI_SGR_FOREGROUND_RED: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + case ANSI_SGR_FOREGROUND_GREEN: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + case ANSI_SGR_FOREGROUND_YELLOW: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + case ANSI_SGR_FOREGROUND_BLUE: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + case ANSI_SGR_FOREGROUND_MAGENTA: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + case ANSI_SGR_FOREGROUND_CYAN: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + case ANSI_SGR_FOREGROUND_WHITE: windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + case ANSI_SGR_BACKGROUND_DEFAULT: // Black with no intensity windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + case ANSI_SGR_BACKGROUND_BLACK: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - case ansiterm.ANSI_SGR_BACKGROUND_RED: + case ANSI_SGR_BACKGROUND_RED: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + case ANSI_SGR_BACKGROUND_GREEN: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + case ANSI_SGR_BACKGROUND_YELLOW: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + case ANSI_SGR_BACKGROUND_BLUE: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + case ANSI_SGR_BACKGROUND_MAGENTA: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + case ANSI_SGR_BACKGROUND_CYAN: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + case ANSI_SGR_BACKGROUND_WHITE: windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE } @@ -95,6 +97,6 @@ func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMod } // invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { +func invertAttributes(windowsMode WORD) WORD { return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go index f015723ade7..e4b1c255a46 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -3,11 +3,11 @@ package winterm const ( - horizontal = iota - vertical + Horizontal = iota + Vertical ) -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { +func (h *WindowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { if h.originMode { sr := h.effectiveSr(info.Window) return SMALL_RECT{ @@ -27,7 +27,7 @@ func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_IN } // setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { +func (h *WindowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { position.X = ensureInRange(position.X, window.Left, window.Right) position.Y = ensureInRange(position.Y, window.Top, window.Bottom) err := SetConsoleCursorPosition(h.fd, position) @@ -38,15 +38,15 @@ func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL return err } -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) +func (h *WindowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(Vertical, param) } -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) +func (h *WindowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(Horizontal, param) } -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { +func (h *WindowsAnsiEventHandler) moveCursor(moveMode int, param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err @@ -54,10 +54,10 @@ func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { position := info.CursorPosition switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) + case Horizontal: + position.X += SHORT(param) + case Vertical: + position.Y += SHORT(param) } if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { @@ -67,7 +67,7 @@ func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { return nil } -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { +func (h *WindowsAnsiEventHandler) moveCursorLine(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err @@ -75,7 +75,7 @@ func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { position := info.CursorPosition position.X = 0 - position.Y += int16(param) + position.Y += SHORT(param) if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { return err @@ -84,14 +84,14 @@ func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { return nil } -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { +func (h *WindowsAnsiEventHandler) moveCursorColumn(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err } position := info.CursorPosition - position.X = int16(param) - 1 + position.X = SHORT(param) - 1 if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { return err diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go index 244b5fa25ef..f02a5b261b5 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -2,9 +2,11 @@ package winterm -import "github.com/Azure/go-ansiterm" +import ( + . "github.com/Azure/go-ansiterm" +) -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { +func (h *WindowsAnsiEventHandler) clearRange(attributes WORD, fromCoord COORD, toCoord COORD) error { // Ignore an invalid (negative area) request if toCoord.Y < fromCoord.Y { return nil @@ -58,7 +60,7 @@ func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, return nil } -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { +func (h *WindowsAnsiEventHandler) clearRect(attributes WORD, fromCoord COORD, toCoord COORD) error { region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} width := toCoord.X - fromCoord.X + 1 height := toCoord.Y - fromCoord.Y + 1 @@ -70,7 +72,7 @@ func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, buffer := make([]CHAR_INFO, size) - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + char := CHAR_INFO{WCHAR(FILL_CHARACTER), attributes} for i := 0; i < int(size); i++ { buffer[i] = char } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go index 706d270577e..ed1998245c0 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -3,9 +3,9 @@ package winterm // effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) +func (h *WindowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := AddInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := AddInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) if top >= bottom { top = window.Top bottom = window.Bottom @@ -13,7 +13,7 @@ func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { return scrollRegion{top: top, bottom: bottom} } -func (h *windowsAnsiEventHandler) scrollUp(param int) error { +func (h *WindowsAnsiEventHandler) scrollUp(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err @@ -23,11 +23,11 @@ func (h *windowsAnsiEventHandler) scrollUp(param int) error { return h.scroll(param, sr, info) } -func (h *windowsAnsiEventHandler) scrollDown(param int) error { +func (h *WindowsAnsiEventHandler) scrollDown(param int) error { return h.scrollUp(-param) } -func (h *windowsAnsiEventHandler) deleteLines(param int) error { +func (h *WindowsAnsiEventHandler) deleteLines(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err @@ -44,12 +44,12 @@ func (h *windowsAnsiEventHandler) deleteLines(param int) error { } } -func (h *windowsAnsiEventHandler) insertLines(param int) error { +func (h *WindowsAnsiEventHandler) insertLines(param int) error { return h.deleteLines(-param) } // scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { +func (h *WindowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) @@ -64,7 +64,7 @@ func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSO // Origin to which area should be copied destOrigin := COORD{ X: 0, - Y: sr.top - int16(param), + Y: sr.top - SHORT(param), } char := CHAR_INFO{ @@ -78,7 +78,7 @@ func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSO return nil } -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { +func (h *WindowsAnsiEventHandler) deleteCharacters(param int) error { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { return err @@ -86,12 +86,12 @@ func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { return h.scrollLine(param, info.CursorPosition, info) } -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { +func (h *WindowsAnsiEventHandler) insertCharacters(param int) error { return h.deleteCharacters(-param) } // scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { +func (h *WindowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { // Copy from and clip to the scroll region (full buffer width) scrollRect := SMALL_RECT{ Top: position.Y, @@ -102,7 +102,7 @@ func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info * // Origin to which area should be copied destOrigin := COORD{ - X: position.X - int16(columns), + X: position.X - SHORT(columns), Y: position.Y, } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go index afa7635d77b..2f963ff132d 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -4,6 +4,6 @@ package winterm // AddInRange increments a value by the passed quantity while ensuring the values // always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { +func AddInRange(n SHORT, increment SHORT, min SHORT, max SHORT) SHORT { return ensureInRange(n+increment, min, max) } diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go index 4d858ed6111..2d492b32e42 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -8,19 +8,19 @@ import ( "os" "strconv" - "github.com/Azure/go-ansiterm" + . "github.com/Azure/go-ansiterm" "github.com/Sirupsen/logrus" ) var logger *logrus.Logger -type windowsAnsiEventHandler struct { +type WindowsAnsiEventHandler struct { fd uintptr file *os.File infoReset *CONSOLE_SCREEN_BUFFER_INFO sr scrollRegion buffer bytes.Buffer - attributes uint16 + attributes WORD inverted bool wrapNext bool drewMarginByte bool @@ -30,10 +30,10 @@ type windowsAnsiEventHandler struct { curPos COORD } -func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { +func CreateWinEventHandler(fd uintptr, file *os.File) AnsiEventHandler { logFile := ioutil.Discard - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { logFile, _ = os.Create("winEventHandler.log") } @@ -48,7 +48,7 @@ func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler return nil } - return &windowsAnsiEventHandler{ + return &WindowsAnsiEventHandler{ fd: fd, file: file, infoReset: infoReset, @@ -57,8 +57,8 @@ func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler } type scrollRegion struct { - top int16 - bottom int16 + top SHORT + bottom SHORT } // simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the @@ -68,7 +68,7 @@ type scrollRegion struct { // // In the false case, the caller should ensure that a carriage return // and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { +func (h *WindowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { if h.wrapNext { if err := h.Flush(); err != nil { return false, err @@ -89,25 +89,24 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { h.updatePos(pos) } return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - logger.Info("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + } else { + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { return false, err } + logger.Info("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil } - return true, nil - } else if pos.Y < info.Window.Bottom { // Let Windows handle the LF. pos.Y++ @@ -134,7 +133,7 @@ func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { } // executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { +func (h *WindowsAnsiEventHandler) executeLF() error { handled, err := h.simulateLF(false) if err != nil { return err @@ -146,7 +145,7 @@ func (h *windowsAnsiEventHandler) executeLF() error { if err != nil { return err } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + h.buffer.WriteByte(ANSI_LINE_FEED) if pos.X != 0 { if err := h.Flush(); err != nil { return err @@ -160,7 +159,7 @@ func (h *windowsAnsiEventHandler) executeLF() error { return nil } -func (h *windowsAnsiEventHandler) Print(b byte) error { +func (h *WindowsAnsiEventHandler) Print(b byte) error { if h.wrapNext { h.buffer.WriteByte(h.marginByte) h.clearWrap() @@ -183,9 +182,9 @@ func (h *windowsAnsiEventHandler) Print(b byte) error { return nil } -func (h *windowsAnsiEventHandler) Execute(b byte) error { +func (h *WindowsAnsiEventHandler) Execute(b byte) error { switch b { - case ansiterm.ANSI_TAB: + case ANSI_TAB: logger.Info("Execute(TAB)") // Move to the next tab stop, but preserve auto-wrap if already set. if !h.wrapNext { @@ -206,11 +205,11 @@ func (h *windowsAnsiEventHandler) Execute(b byte) error { } return nil - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) + case ANSI_BEL: + h.buffer.WriteByte(ANSI_BEL) return nil - case ansiterm.ANSI_BACKSPACE: + case ANSI_BACKSPACE: if h.wrapNext { if err := h.Flush(); err != nil { return err @@ -224,15 +223,15 @@ func (h *windowsAnsiEventHandler) Execute(b byte) error { if pos.X > 0 { pos.X-- h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + h.buffer.WriteByte(ANSI_BACKSPACE) } return nil - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + case ANSI_VERTICAL_TAB, ANSI_FORM_FEED: // Treat as true LF. return h.executeLF() - case ansiterm.ANSI_LINE_FEED: + case ANSI_LINE_FEED: // Simulate a CR and LF for now since there is no way in go-ansiterm // to tell if the LF should include CR (and more things break when it's // missing than when it's incorrectly added). @@ -240,9 +239,9 @@ func (h *windowsAnsiEventHandler) Execute(b byte) error { if handled || err != nil { return err } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + return h.buffer.WriteByte(ANSI_LINE_FEED) - case ansiterm.ANSI_CARRIAGE_RETURN: + case ANSI_CARRIAGE_RETURN: if h.wrapNext { if err := h.Flush(); err != nil { return err @@ -256,7 +255,7 @@ func (h *windowsAnsiEventHandler) Execute(b byte) error { if pos.X != 0 { pos.X = 0 h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + h.buffer.WriteByte(ANSI_CARRIAGE_RETURN) } return nil @@ -265,7 +264,7 @@ func (h *windowsAnsiEventHandler) Execute(b byte) error { } } -func (h *windowsAnsiEventHandler) CUU(param int) error { +func (h *WindowsAnsiEventHandler) CUU(param int) error { if err := h.Flush(); err != nil { return err } @@ -274,7 +273,7 @@ func (h *windowsAnsiEventHandler) CUU(param int) error { return h.moveCursorVertical(-param) } -func (h *windowsAnsiEventHandler) CUD(param int) error { +func (h *WindowsAnsiEventHandler) CUD(param int) error { if err := h.Flush(); err != nil { return err } @@ -283,7 +282,7 @@ func (h *windowsAnsiEventHandler) CUD(param int) error { return h.moveCursorVertical(param) } -func (h *windowsAnsiEventHandler) CUF(param int) error { +func (h *WindowsAnsiEventHandler) CUF(param int) error { if err := h.Flush(); err != nil { return err } @@ -292,7 +291,7 @@ func (h *windowsAnsiEventHandler) CUF(param int) error { return h.moveCursorHorizontal(param) } -func (h *windowsAnsiEventHandler) CUB(param int) error { +func (h *WindowsAnsiEventHandler) CUB(param int) error { if err := h.Flush(); err != nil { return err } @@ -301,7 +300,7 @@ func (h *windowsAnsiEventHandler) CUB(param int) error { return h.moveCursorHorizontal(-param) } -func (h *windowsAnsiEventHandler) CNL(param int) error { +func (h *WindowsAnsiEventHandler) CNL(param int) error { if err := h.Flush(); err != nil { return err } @@ -310,7 +309,7 @@ func (h *windowsAnsiEventHandler) CNL(param int) error { return h.moveCursorLine(param) } -func (h *windowsAnsiEventHandler) CPL(param int) error { +func (h *WindowsAnsiEventHandler) CPL(param int) error { if err := h.Flush(); err != nil { return err } @@ -319,7 +318,7 @@ func (h *windowsAnsiEventHandler) CPL(param int) error { return h.moveCursorLine(-param) } -func (h *windowsAnsiEventHandler) CHA(param int) error { +func (h *WindowsAnsiEventHandler) CHA(param int) error { if err := h.Flush(); err != nil { return err } @@ -328,7 +327,7 @@ func (h *windowsAnsiEventHandler) CHA(param int) error { return h.moveCursorColumn(param) } -func (h *windowsAnsiEventHandler) VPA(param int) error { +func (h *WindowsAnsiEventHandler) VPA(param int) error { if err := h.Flush(); err != nil { return err } @@ -340,11 +339,11 @@ func (h *windowsAnsiEventHandler) VPA(param int) error { } window := h.getCursorWindow(info) position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 + position.Y = window.Top + SHORT(param) - 1 return h.setCursorPosition(position, window) } -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { +func (h *WindowsAnsiEventHandler) CUP(row int, col int) error { if err := h.Flush(); err != nil { return err } @@ -356,11 +355,11 @@ func (h *windowsAnsiEventHandler) CUP(row int, col int) error { } window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + position := COORD{window.Left + SHORT(col) - 1, window.Top + SHORT(row) - 1} return h.setCursorPosition(position, window) } -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { +func (h *WindowsAnsiEventHandler) HVP(row int, col int) error { if err := h.Flush(); err != nil { return err } @@ -369,7 +368,7 @@ func (h *windowsAnsiEventHandler) HVP(row int, col int) error { return h.CUP(row, col) } -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { +func (h *WindowsAnsiEventHandler) DECTCEM(visible bool) error { if err := h.Flush(); err != nil { return err } @@ -378,7 +377,7 @@ func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { return nil } -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { +func (h *WindowsAnsiEventHandler) DECOM(enable bool) error { if err := h.Flush(); err != nil { return err } @@ -388,7 +387,7 @@ func (h *windowsAnsiEventHandler) DECOM(enable bool) error { return h.CUP(1, 1) } -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { +func (h *WindowsAnsiEventHandler) DECCOLM(use132 bool) error { if err := h.Flush(); err != nil { return err } @@ -401,7 +400,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { if err != nil { return err } - targetWidth := int16(80) + targetWidth := SHORT(80) if use132 { targetWidth = 132 } @@ -427,7 +426,7 @@ func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { return SetConsoleCursorPosition(h.fd, COORD{0, 0}) } -func (h *windowsAnsiEventHandler) ED(param int) error { +func (h *WindowsAnsiEventHandler) ED(param int) error { if err := h.Flush(); err != nil { return err } @@ -486,7 +485,7 @@ func (h *windowsAnsiEventHandler) ED(param int) error { return nil } -func (h *windowsAnsiEventHandler) EL(param int) error { +func (h *WindowsAnsiEventHandler) EL(param int) error { if err := h.Flush(); err != nil { return err } @@ -527,7 +526,7 @@ func (h *windowsAnsiEventHandler) EL(param int) error { return nil } -func (h *windowsAnsiEventHandler) IL(param int) error { +func (h *WindowsAnsiEventHandler) IL(param int) error { if err := h.Flush(); err != nil { return err } @@ -536,7 +535,7 @@ func (h *windowsAnsiEventHandler) IL(param int) error { return h.insertLines(param) } -func (h *windowsAnsiEventHandler) DL(param int) error { +func (h *WindowsAnsiEventHandler) DL(param int) error { if err := h.Flush(); err != nil { return err } @@ -545,7 +544,7 @@ func (h *windowsAnsiEventHandler) DL(param int) error { return h.deleteLines(param) } -func (h *windowsAnsiEventHandler) ICH(param int) error { +func (h *WindowsAnsiEventHandler) ICH(param int) error { if err := h.Flush(); err != nil { return err } @@ -554,7 +553,7 @@ func (h *windowsAnsiEventHandler) ICH(param int) error { return h.insertCharacters(param) } -func (h *windowsAnsiEventHandler) DCH(param int) error { +func (h *WindowsAnsiEventHandler) DCH(param int) error { if err := h.Flush(); err != nil { return err } @@ -563,7 +562,7 @@ func (h *windowsAnsiEventHandler) DCH(param int) error { return h.deleteCharacters(param) } -func (h *windowsAnsiEventHandler) SGR(params []int) error { +func (h *WindowsAnsiEventHandler) SGR(params []int) error { if err := h.Flush(); err != nil { return err } @@ -580,13 +579,13 @@ func (h *windowsAnsiEventHandler) SGR(params []int) error { } else { for _, attr := range params { - if attr == ansiterm.ANSI_SGR_RESET { + if attr == ANSI_SGR_RESET { h.attributes = h.infoReset.Attributes h.inverted = false continue } - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, SHORT(attr)) } } @@ -602,7 +601,7 @@ func (h *windowsAnsiEventHandler) SGR(params []int) error { return nil } -func (h *windowsAnsiEventHandler) SU(param int) error { +func (h *WindowsAnsiEventHandler) SU(param int) error { if err := h.Flush(); err != nil { return err } @@ -611,7 +610,7 @@ func (h *windowsAnsiEventHandler) SU(param int) error { return h.scrollUp(param) } -func (h *windowsAnsiEventHandler) SD(param int) error { +func (h *WindowsAnsiEventHandler) SD(param int) error { if err := h.Flush(); err != nil { return err } @@ -620,29 +619,29 @@ func (h *windowsAnsiEventHandler) SD(param int) error { return h.scrollDown(param) } -func (h *windowsAnsiEventHandler) DA(params []string) error { +func (h *WindowsAnsiEventHandler) DA(params []string) error { logger.Infof("DA: [%v]", params) // DA cannot be implemented because it must send data on the VT100 input stream, // which is not available to go-ansiterm. return nil } -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { +func (h *WindowsAnsiEventHandler) DECSTBM(top int, bottom int) error { if err := h.Flush(); err != nil { return err } logger.Infof("DECSTBM: [%d, %d]", top, bottom) // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) + h.sr.top = SHORT(top - 1) + h.sr.bottom = SHORT(bottom - 1) // This command also moves the cursor to the origin. h.clearWrap() return h.CUP(1, 1) } -func (h *windowsAnsiEventHandler) RI() error { +func (h *WindowsAnsiEventHandler) RI() error { if err := h.Flush(); err != nil { return err } @@ -657,17 +656,17 @@ func (h *windowsAnsiEventHandler) RI() error { sr := h.effectiveSr(info.Window) if info.CursorPosition.Y == sr.top { return h.scrollDown(1) + } else { + return h.moveCursorVertical(-1) } - - return h.moveCursorVertical(-1) } -func (h *windowsAnsiEventHandler) IND() error { +func (h *WindowsAnsiEventHandler) IND() error { logger.Info("IND: []") return h.executeLF() } -func (h *windowsAnsiEventHandler) Flush() error { +func (h *WindowsAnsiEventHandler) Flush() error { h.curInfo = nil if h.buffer.Len() > 0 { logger.Infof("Flush: [%s]", h.buffer.Bytes()) @@ -684,7 +683,7 @@ func (h *windowsAnsiEventHandler) Flush() error { return err } - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + charInfo := []CHAR_INFO{{UnicodeChar: WCHAR(h.marginByte), Attributes: info.Attributes}} size := COORD{1, 1} position := COORD{0, 0} region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} @@ -698,7 +697,7 @@ func (h *windowsAnsiEventHandler) Flush() error { // cacheConsoleInfo ensures that the current console screen information has been queried // since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { +func (h *WindowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { if h.curInfo == nil { info, err := GetConsoleScreenBufferInfo(h.fd) if err != nil { @@ -710,7 +709,7 @@ func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFE return h.curPos, h.curInfo, nil } -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { +func (h *WindowsAnsiEventHandler) updatePos(pos COORD) { if h.curInfo == nil { panic("failed to call getCurrentInfo before calling updatePos") } @@ -720,7 +719,7 @@ func (h *windowsAnsiEventHandler) updatePos(pos COORD) { // clearWrap clears the state where the cursor is in the margin // waiting for the next character before wrapping the line. This must // be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { +func (h *WindowsAnsiEventHandler) clearWrap() { h.wrapNext = false h.drewMarginByte = false }