Merge branch 'master' of github.com:kubernetes/kubernetes
This commit is contained in:
9
Godeps/Godeps.json
generated
9
Godeps/Godeps.json
generated
@@ -38,6 +38,11 @@
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk",
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network",
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
@@ -55,11 +60,11 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-ansiterm",
|
||||
"Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e"
|
||||
"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-ansiterm/winterm",
|
||||
"Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e"
|
||||
"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-autorest/autorest",
|
||||
|
210
Godeps/LICENSES
generated
210
Godeps/LICENSES
generated
@@ -8952,6 +8952,216 @@ SOFTWARE.
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/Azure/azure-sdk-for-go/arm/disk licensed under: =
|
||||
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
= vendor/github.com/Azure/azure-sdk-for-go/LICENSE cce6fd055830ca30ff78fdf077e870d6 -
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/Azure/azure-sdk-for-go/arm/network licensed under: =
|
||||
|
||||
|
@@ -17,7 +17,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
|
@@ -523,6 +523,20 @@ bazel-test:
|
||||
bazel test --test_tag_filters=-integration --flaky_test_attempts=3 //cmd/... //pkg/... //federation/... //plugin/... //third_party/... //hack/... //hack:verify-all //vendor/k8s.io/...
|
||||
endif
|
||||
|
||||
ifeq ($(PRINT_HELP),y)
|
||||
define BAZEL_TEST_INTEGRATION_HELP_INFO
|
||||
# Integration test with bazel
|
||||
#
|
||||
# Example:
|
||||
# make bazel-test-integration
|
||||
endef
|
||||
bazel-test-integration:
|
||||
@echo "$$BAZEL_TEST_INTEGRATION_HELP_INFO"
|
||||
else
|
||||
bazel-test-integration:
|
||||
bazel test //test/integration/...
|
||||
endif
|
||||
|
||||
ifeq ($(PRINT_HELP),y)
|
||||
define BAZEL_BUILD_HELP_INFO
|
||||
# Build release tars with bazel
|
||||
|
@@ -12,6 +12,16 @@ http_archive(
|
||||
urls = ["https://github.com/kubernetes/repo-infra/archive/9dedd5f4093884c133ad5ea73695b28338b954ab.tar.gz"],
|
||||
)
|
||||
|
||||
ETCD_VERSION = "3.0.17"
|
||||
|
||||
new_http_archive(
|
||||
name = "com_coreos_etcd",
|
||||
build_file = "third_party/etcd.BUILD",
|
||||
sha256 = "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b",
|
||||
strip_prefix = "etcd-v%s-linux-amd64" % ETCD_VERSION,
|
||||
urls = ["https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)],
|
||||
)
|
||||
|
||||
# This contains a patch to not prepend ./ to tarfiles produced by pkg_tar.
|
||||
# When merged upstream, we'll no longer need to use ixdy's fork:
|
||||
# https://bazel-review.googlesource.com/#/c/10390/
|
||||
|
@@ -35,4 +35,3 @@ reviewers:
|
||||
- k82cn
|
||||
- caseydavenport
|
||||
- johscheuer
|
||||
- rjnagal
|
||||
|
@@ -5,7 +5,6 @@ approvers:
|
||||
reviewers:
|
||||
- '249043822'
|
||||
- a-robinson
|
||||
- bprashanth
|
||||
- brendandburns
|
||||
- caesarxuchao
|
||||
- cjcullen
|
||||
|
@@ -87,6 +87,12 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
||||
i, err := NewInit(cfgPath, internalcfg, skipPreFlight, skipTokenPrint)
|
||||
kubeadmutil.CheckErr(err)
|
||||
kubeadmutil.CheckErr(i.Validate(cmd))
|
||||
|
||||
// TODO: remove this warning in 1.9
|
||||
if !cmd.Flags().Lookup("token-ttl").Changed {
|
||||
fmt.Println("[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)")
|
||||
}
|
||||
|
||||
kubeadmutil.CheckErr(i.Run(out))
|
||||
},
|
||||
}
|
||||
|
@@ -109,6 +109,12 @@ func NewCmdToken(out io.Writer, errW io.Writer) *cobra.Command {
|
||||
client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// TODO: remove this warning in 1.9
|
||||
if !tokenCmd.Flags().Lookup("ttl").Changed {
|
||||
// sending this output to stderr s
|
||||
fmt.Fprintln(errW, "[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --ttl 0)")
|
||||
}
|
||||
|
||||
err = RunCreateToken(out, client, token, tokenDuration, usages, description)
|
||||
kubeadmutil.CheckErr(err)
|
||||
},
|
||||
|
@@ -84,8 +84,8 @@ const (
|
||||
MinimumAddressesInServiceSubnet = 10
|
||||
|
||||
// DefaultTokenDuration specifies the default amount of time that a bootstrap token will be valid
|
||||
// Default behaviour is "never expire" == 0
|
||||
DefaultTokenDuration = 0
|
||||
// Default behaviour is 24 hours
|
||||
DefaultTokenDuration = 24 * time.Hour
|
||||
|
||||
// LabelNodeRoleMaster specifies that a node is a master
|
||||
// It's copied over to kubeadm until it's merged in core: https://github.com/kubernetes/kubernetes/pull/39112
|
||||
|
@@ -31,7 +31,8 @@ roleRef:
|
||||
kind: ClusterRole
|
||||
name: restricted-psp-user
|
||||
---
|
||||
# edit grants edit role to system:authenticated.
|
||||
# edit grants edit role to the groups
|
||||
# restricted and privileged.
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
@@ -48,7 +48,7 @@ domount() {
|
||||
SHARE=$(echo $2 | jq -r '.share')
|
||||
|
||||
if [ $(ismounted) -eq 1 ] ; then
|
||||
log "{\"status\": \"Success\"}"
|
||||
log '{"status": "Success"}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -59,14 +59,14 @@ domount() {
|
||||
err "{ \"status\": \"Failure\", \"message\": \"Failed to mount ${NFS_SERVER}:${SHARE} at ${MNTPATH}\"}"
|
||||
exit 1
|
||||
fi
|
||||
log "{\"status\": \"Success\"}"
|
||||
log '{"status": "Success"}'
|
||||
exit 0
|
||||
}
|
||||
|
||||
unmount() {
|
||||
MNTPATH=$1
|
||||
if [ $(ismounted) -eq 0 ] ; then
|
||||
log "{\"status\": \"Success\"}"
|
||||
log '{"status": "Success"}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -76,14 +76,19 @@ unmount() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "{\"status\": \"Success\"}"
|
||||
log '{"status": "Success"}'
|
||||
exit 0
|
||||
}
|
||||
|
||||
op=$1
|
||||
|
||||
if ! command -v jq >/dev/null 2>&1; then
|
||||
err "{ \"status\": \"Failure\", \"message\": \"'jq' binary not found. Please install jq package before using this driver\"}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$op" = "init" ]; then
|
||||
log "{\"status\": \"Success\", \"capabilities\": {\"attach\": false}}"
|
||||
log '{"status": "Success", "capabilities": {"attach": false}}'
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -101,7 +106,7 @@ case "$op" in
|
||||
unmount $*
|
||||
;;
|
||||
*)
|
||||
log "{ \"status\": \"Not supported\" }"
|
||||
log '{"status": "Not supported"}'
|
||||
exit 0
|
||||
esac
|
||||
|
||||
|
@@ -205,6 +205,7 @@ pkg/controller/volume/attachdetach/util
|
||||
pkg/conversion
|
||||
pkg/conversion/queryparams
|
||||
pkg/credentialprovider/aws
|
||||
pkg/credentialprovider/azure
|
||||
pkg/fieldpath
|
||||
pkg/fields
|
||||
pkg/hyperkube
|
||||
|
@@ -24,3 +24,4 @@ valid_flag
|
||||
retry_time
|
||||
file_content_in_loop
|
||||
break_on_expected_content
|
||||
Premium_LRS
|
||||
|
@@ -17,7 +17,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
|
@@ -8,7 +8,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- erictune
|
||||
- tallclair
|
||||
- eparis
|
||||
|
@@ -11,7 +11,6 @@ reviewers:
|
||||
- vishh
|
||||
- mikedanese
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- erictune
|
||||
- pmorie
|
||||
- dchen1107
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
@@ -37,4 +36,3 @@ reviewers:
|
||||
- krousey
|
||||
- rootfs
|
||||
- markturansky
|
||||
- vmarmol
|
||||
|
@@ -16,7 +16,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- pmorie
|
||||
|
@@ -4,7 +4,6 @@ reviewers:
|
||||
- smarterclayton
|
||||
- deads2k
|
||||
- caesarxuchao
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- sttts
|
||||
- saad-ali
|
||||
|
@@ -11,7 +11,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- sttts
|
||||
- dchen1107
|
||||
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- erictune
|
||||
- pmorie
|
||||
- sttts
|
||||
|
@@ -18,7 +18,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
|
@@ -12,6 +12,7 @@ go_library(
|
||||
srcs = [
|
||||
"clientset.go",
|
||||
"doc.go",
|
||||
"import.go",
|
||||
"import_known_versions.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
19
pkg/client/clientset_generated/clientset/import.go
Normal file
19
pkg/client/clientset_generated/clientset/import.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file exists to enforce this clientset's vanity import path.
|
||||
|
||||
package clientset // import "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- ixdy
|
||||
- gmarek
|
||||
- erictune
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- vishh
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
@@ -34,7 +33,6 @@ reviewers:
|
||||
- rootfs
|
||||
- jszczepkowski
|
||||
- markturansky
|
||||
- vmarmol
|
||||
- girishkalele
|
||||
- satnam6502
|
||||
- jdef
|
||||
|
@@ -13,18 +13,19 @@ go_library(
|
||||
srcs = [
|
||||
"azure.go",
|
||||
"azure_backoff.go",
|
||||
"azure_blob.go",
|
||||
"azure_blobDiskController.go",
|
||||
"azure_controllerCommon.go",
|
||||
"azure_file.go",
|
||||
"azure_instance_metadata.go",
|
||||
"azure_instances.go",
|
||||
"azure_loadbalancer.go",
|
||||
"azure_managedDiskController.go",
|
||||
"azure_routes.go",
|
||||
"azure_storage.go",
|
||||
"azure_storageaccount.go",
|
||||
"azure_util.go",
|
||||
"azure_wrap.go",
|
||||
"azure_zones.go",
|
||||
"vhd.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
@@ -34,6 +35,7 @@ go_library(
|
||||
"//pkg/version:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/disk:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/network:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/storage:go_default_library",
|
||||
|
@@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/network"
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
@@ -107,6 +108,9 @@ type Config struct {
|
||||
|
||||
// Use instance metadata service where possible
|
||||
UseInstanceMetadata bool `json:"useInstanceMetadata" yaml:"useInstanceMetadata"`
|
||||
|
||||
// Use managed service identity for the virtual machine to access Azure ARM APIs
|
||||
UseManagedIdentityExtension bool `json:"useManagedIdentityExtension"`
|
||||
}
|
||||
|
||||
// Cloud holds the config and clients
|
||||
@@ -122,8 +126,13 @@ type Cloud struct {
|
||||
SecurityGroupsClient network.SecurityGroupsClient
|
||||
VirtualMachinesClient compute.VirtualMachinesClient
|
||||
StorageAccountClient storage.AccountsClient
|
||||
DisksClient disk.DisksClient
|
||||
operationPollRateLimiter flowcontrol.RateLimiter
|
||||
resourceRequestBackoff wait.Backoff
|
||||
|
||||
*BlobDiskController
|
||||
*ManagedDiskController
|
||||
*controllerCommon
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -145,62 +154,62 @@ func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.Private
|
||||
return certificate, rsaPrivateKey, nil
|
||||
}
|
||||
|
||||
// newServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func newServicePrincipalToken(az *Cloud) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(az.Environment.ActiveDirectoryEndpoint, az.TenantID)
|
||||
// GetServicePrincipalToken creates a new service principal token based on the configuration
|
||||
func GetServicePrincipalToken(config *Config, env *azure.Environment) (*adal.ServicePrincipalToken, error) {
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, config.TenantID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating the OAuth config: %v", err)
|
||||
}
|
||||
|
||||
if len(az.AADClientSecret) > 0 {
|
||||
if config.UseManagedIdentityExtension {
|
||||
glog.V(2).Infoln("azure: using managed identity extension to retrieve access token")
|
||||
return adal.NewServicePrincipalTokenFromMSI(
|
||||
*oauthConfig,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
if len(config.AADClientSecret) > 0 {
|
||||
glog.V(2).Infoln("azure: using client_id+client_secret to retrieve access token")
|
||||
return adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
az.AADClientID,
|
||||
az.AADClientSecret,
|
||||
az.Environment.ServiceManagementEndpoint)
|
||||
} else if len(az.AADClientCertPath) > 0 && len(az.AADClientCertPassword) > 0 {
|
||||
certData, err := ioutil.ReadFile(az.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", az.AADClientCertPath, err)
|
||||
config.AADClientID,
|
||||
config.AADClientSecret,
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, az.AADClientCertPassword)
|
||||
|
||||
if len(config.AADClientCertPath) > 0 && len(config.AADClientCertPassword) > 0 {
|
||||
glog.V(2).Infoln("azure: using jwt client_assertion (client_cert+client_private_key) to retrieve access token")
|
||||
certData, err := ioutil.ReadFile(config.AADClientCertPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading the client certificate from file %s: %v", config.AADClientCertPath, err)
|
||||
}
|
||||
certificate, privateKey, err := decodePkcs12(certData, config.AADClientCertPassword)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decoding the client certificate: %v", err)
|
||||
}
|
||||
return adal.NewServicePrincipalTokenFromCertificate(
|
||||
*oauthConfig,
|
||||
az.AADClientID,
|
||||
config.AADClientID,
|
||||
certificate,
|
||||
privateKey,
|
||||
az.Environment.ServiceManagementEndpoint)
|
||||
} else {
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", az.AADClientID)
|
||||
env.ServiceManagementEndpoint)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("No credentials provided for AAD application %s", config.AADClientID)
|
||||
}
|
||||
|
||||
// NewCloud returns a Cloud with initialized clients
|
||||
func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
var az Cloud
|
||||
|
||||
configContents, err := ioutil.ReadAll(configReader)
|
||||
config, env, err := ParseConfig(configReader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(configContents, &az)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
az := Cloud{
|
||||
Config: *config,
|
||||
Environment: *env,
|
||||
}
|
||||
|
||||
if az.Cloud == "" {
|
||||
az.Environment = azure.PublicCloud
|
||||
} else {
|
||||
az.Environment, err = azure.EnvironmentFromName(az.Cloud)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := newServicePrincipalToken(&az)
|
||||
servicePrincipalToken, err := GetServicePrincipalToken(config, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -255,6 +264,11 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
|
||||
az.StorageAccountClient = storage.NewAccountsClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.StorageAccountClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.StorageAccountClient.Client)
|
||||
|
||||
az.DisksClient = disk.NewDisksClientWithBaseURI(az.Environment.ResourceManagerEndpoint, az.SubscriptionID)
|
||||
az.DisksClient.Authorizer = autorest.NewBearerAuthorizer(servicePrincipalToken)
|
||||
configureUserAgent(&az.DisksClient.Client)
|
||||
|
||||
// Conditionally configure rate limits
|
||||
if az.CloudProviderRateLimit {
|
||||
@@ -304,9 +318,37 @@ func NewCloud(configReader io.Reader) (cloudprovider.Interface, error) {
|
||||
az.CloudProviderBackoffJitter)
|
||||
}
|
||||
|
||||
if err := initDiskControllers(&az); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &az, nil
|
||||
}
|
||||
|
||||
// ParseConfig returns a parsed configuration and azure.Environment for an Azure cloudprovider config file
|
||||
func ParseConfig(configReader io.Reader) (*Config, *azure.Environment, error) {
|
||||
var config Config
|
||||
|
||||
configContents, err := ioutil.ReadAll(configReader)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
err = yaml.Unmarshal(configContents, &config)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var env azure.Environment
|
||||
if config.Cloud == "" {
|
||||
env = azure.PublicCloud
|
||||
} else {
|
||||
env, err = azure.EnvironmentFromName(config.Cloud)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
return &config, &env, nil
|
||||
}
|
||||
|
||||
// Initialize passes a Kubernetes clientBuilder interface to the cloud provider
|
||||
func (az *Cloud) Initialize(clientBuilder controller.ControllerClientBuilder) {}
|
||||
|
||||
@@ -353,3 +395,42 @@ func configureUserAgent(client *autorest.Client) {
|
||||
k8sVersion := version.Get().GitVersion
|
||||
client.UserAgent = fmt.Sprintf("%s; kubernetes-cloudprovider/%s", client.UserAgent, k8sVersion)
|
||||
}
|
||||
|
||||
func initDiskControllers(az *Cloud) error {
|
||||
// Common controller contains the function
|
||||
// needed by both blob disk and managed disk controllers
|
||||
|
||||
common := &controllerCommon{
|
||||
aadResourceEndPoint: az.Environment.ServiceManagementEndpoint,
|
||||
clientID: az.AADClientID,
|
||||
clientSecret: az.AADClientSecret,
|
||||
location: az.Location,
|
||||
storageEndpointSuffix: az.Environment.StorageEndpointSuffix,
|
||||
managementEndpoint: az.Environment.ResourceManagerEndpoint,
|
||||
resourceGroup: az.ResourceGroup,
|
||||
tenantID: az.TenantID,
|
||||
tokenEndPoint: az.Environment.ActiveDirectoryEndpoint,
|
||||
subscriptionID: az.SubscriptionID,
|
||||
cloud: az,
|
||||
}
|
||||
|
||||
// BlobDiskController: contains the function needed to
|
||||
// create/attach/detach/delete blob based (unmanaged disks)
|
||||
blobController, err := newBlobDiskController(common)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AzureDisk - failed to init Blob Disk Controller with error (%s)", err.Error())
|
||||
}
|
||||
|
||||
// ManagedDiskController: contains the functions needed to
|
||||
// create/attach/detach/delete managed disks
|
||||
managedController, err := newManagedDiskController(common)
|
||||
if err != nil {
|
||||
return fmt.Errorf("AzureDisk - failed to init Managed Disk Controller with error (%s)", err.Error())
|
||||
}
|
||||
|
||||
az.BlobDiskController = blobController
|
||||
az.ManagedDiskController = managedController
|
||||
az.controllerCommon = common
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,126 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"bytes"
|
||||
|
||||
azs "github.com/Azure/azure-sdk-for-go/storage"
|
||||
)
|
||||
|
||||
const (
|
||||
vhdContainerName = "vhds"
|
||||
useHTTPS = true
|
||||
blobServiceName = "blob"
|
||||
)
|
||||
|
||||
// create page blob
|
||||
func (az *Cloud) createVhdBlob(accountName, accountKey, name string, sizeGB int64, tags map[string]string) (string, string, error) {
|
||||
blobClient, err := az.getBlobClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
size := 1024 * 1024 * 1024 * sizeGB
|
||||
vhdSize := size + vhdHeaderSize /* header size */
|
||||
// Blob name in URL must end with '.vhd' extension.
|
||||
name = name + ".vhd"
|
||||
cnt := blobClient.GetContainerReference(vhdContainerName)
|
||||
b := cnt.GetBlobReference(name)
|
||||
b.Properties.ContentLength = vhdSize
|
||||
b.Metadata = tags
|
||||
err = b.PutPageBlob(nil)
|
||||
if err != nil {
|
||||
// if container doesn't exist, create one and retry PutPageBlob
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errContainerNotFound) {
|
||||
err = cnt.Create(&azs.CreateContainerOptions{Access: azs.ContainerAccessTypePrivate})
|
||||
if err == nil {
|
||||
b := cnt.GetBlobReference(name)
|
||||
b.Properties.ContentLength = vhdSize
|
||||
b.Metadata = tags
|
||||
err = b.PutPageBlob(nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to put page blob: %v", err)
|
||||
}
|
||||
|
||||
// add VHD signature to the blob
|
||||
h, err := createVHDHeader(uint64(size))
|
||||
if err != nil {
|
||||
az.deleteVhdBlob(accountName, accountKey, name)
|
||||
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
|
||||
}
|
||||
blobRange := azs.BlobRange{
|
||||
Start: uint64(size),
|
||||
End: uint64(vhdSize - 1),
|
||||
}
|
||||
if err = b.WriteRange(blobRange, bytes.NewBuffer(h[:vhdHeaderSize]), nil); err != nil {
|
||||
az.deleteVhdBlob(accountName, accountKey, name)
|
||||
return "", "", fmt.Errorf("failed to update vhd header, err: %v", err)
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if useHTTPS {
|
||||
scheme = "https"
|
||||
}
|
||||
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, az.Environment.StorageEndpointSuffix)
|
||||
uri := fmt.Sprintf("%s/%s/%s", host, vhdContainerName, name)
|
||||
return name, uri, nil
|
||||
|
||||
}
|
||||
|
||||
// delete a vhd blob
|
||||
func (az *Cloud) deleteVhdBlob(accountName, accountKey, blobName string) error {
|
||||
blobClient, err := az.getBlobClient(accountName, accountKey)
|
||||
if err == nil {
|
||||
cnt := blobClient.GetContainerReference(vhdContainerName)
|
||||
b := cnt.GetBlobReference(blobName)
|
||||
return b.Delete(nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (az *Cloud) getBlobClient(accountName, accountKey string) (*azs.BlobStorageClient, error) {
|
||||
client, err := azs.NewClient(accountName, accountKey, az.Environment.StorageEndpointSuffix, azs.DefaultAPIVersion, useHTTPS)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating azure client: %v", err)
|
||||
}
|
||||
b := client.GetBlobService()
|
||||
return &b, nil
|
||||
}
|
||||
|
||||
// get uri https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
|
||||
func (az *Cloud) getBlobNameAndAccountFromURI(uri string) (string, string, error) {
|
||||
scheme := "http"
|
||||
if useHTTPS {
|
||||
scheme = "https"
|
||||
}
|
||||
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, az.Environment.StorageEndpointSuffix)
|
||||
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
|
||||
re := regexp.MustCompile(reStr)
|
||||
res := re.FindSubmatch([]byte(uri))
|
||||
if len(res) < 3 {
|
||||
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, uri)
|
||||
}
|
||||
return string(res[1]), string(res[2]), nil
|
||||
}
|
808
pkg/cloudprovider/providers/azure/azure_blobDiskController.go
Normal file
808
pkg/cloudprovider/providers/azure/azure_blobDiskController.go
Normal file
@@ -0,0 +1,808 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
azstorage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
"github.com/rubiojr/go-vhd/vhd"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
vhdContainerName = "vhds"
|
||||
useHTTPSForBlobBasedDisk = true
|
||||
blobServiceName = "blob"
|
||||
)
|
||||
|
||||
type storageAccountState struct {
|
||||
name string
|
||||
saType storage.SkuName
|
||||
key string
|
||||
diskCount int32
|
||||
isValidating int32
|
||||
defaultContainerCreated bool
|
||||
}
|
||||
|
||||
//BlobDiskController : blob disk controller struct
|
||||
type BlobDiskController struct {
|
||||
common *controllerCommon
|
||||
accounts map[string]*storageAccountState
|
||||
}
|
||||
|
||||
var defaultContainerName = ""
|
||||
var storageAccountNamePrefix = ""
|
||||
var storageAccountNameMatch = ""
|
||||
var initFlag int64
|
||||
|
||||
var accountsLock = &sync.Mutex{}
|
||||
|
||||
func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) {
|
||||
c := BlobDiskController{common: common}
|
||||
err := c.init()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &c, nil
|
||||
}
|
||||
|
||||
// CreateVolume creates a VHD blob in a given storage account, will create the given storage account if it does not exist in current resource group
|
||||
func (c *BlobDiskController) CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error) {
|
||||
key, err := c.common.cloud.getStorageAccesskey(storageAccount)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("azureDisk - no key found for storage account %s in resource group %s, begin to create a new storage account", storageAccount, c.common.resourceGroup)
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errchan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccount, cp, cancel)
|
||||
err = <-errchan
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf(fmt.Sprintf("Create Storage Account %s, error: %s", storageAccount, err))
|
||||
}
|
||||
|
||||
key, err = c.common.cloud.getStorageAccesskey(storageAccount)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("no key found for storage account %s even after creating a new storage account", storageAccount)
|
||||
}
|
||||
|
||||
glog.Errorf("no key found for storage account %s in resource group %s", storageAccount, c.common.resourceGroup)
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
client, err := azstorage.NewBasicClient(storageAccount, key)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
container := blobClient.GetContainerReference(vhdContainerName)
|
||||
_, err = container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccount, name, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
func (c *BlobDiskController) DeleteVolume(diskURI string) error {
|
||||
glog.V(4).Infof("azureDisk - begin to delete volume %s", diskURI)
|
||||
accountName, blob, err := c.common.cloud.getBlobNameAndAccountFromURI(diskURI)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse vhd URI %v", err)
|
||||
}
|
||||
key, err := c.common.cloud.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
|
||||
}
|
||||
err = c.common.cloud.deleteVhdBlob(accountName, key, blob)
|
||||
if err != nil {
|
||||
glog.Warningf("azureDisk - failed to delete blob %s err: %v", diskURI, err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseIDMissing) {
|
||||
// disk is still being used
|
||||
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
|
||||
return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", diskURI))
|
||||
}
|
||||
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", diskURI, accountName, blob, err)
|
||||
}
|
||||
glog.V(4).Infof("azureDisk - blob %s deleted", diskURI)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// get diskURI https://foo.blob.core.windows.net/vhds/bar.vhd and return foo (account) and bar.vhd (blob name)
|
||||
func (c *BlobDiskController) getBlobNameAndAccountFromURI(diskURI string) (string, string, error) {
|
||||
scheme := "http"
|
||||
if useHTTPSForBlobBasedDisk {
|
||||
scheme = "https"
|
||||
}
|
||||
host := fmt.Sprintf("%s://(.*).%s.%s", scheme, blobServiceName, c.common.storageEndpointSuffix)
|
||||
reStr := fmt.Sprintf("%s/%s/(.*)", host, vhdContainerName)
|
||||
re := regexp.MustCompile(reStr)
|
||||
res := re.FindSubmatch([]byte(diskURI))
|
||||
if len(res) < 3 {
|
||||
return "", "", fmt.Errorf("invalid vhd URI for regex %s: %s", reStr, diskURI)
|
||||
}
|
||||
return string(res[1]), string(res[2]), nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) createVHDBlobDisk(blobClient azstorage.BlobStorageClient, accountName, vhdName, containerName string, sizeGB int64) (string, string, error) {
|
||||
container := blobClient.GetContainerReference(containerName)
|
||||
_, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
size := 1024 * 1024 * 1024 * sizeGB
|
||||
vhdSize := size + vhd.VHD_HEADER_SIZE /* header size */
|
||||
// Blob name in URL must end with '.vhd' extension.
|
||||
vhdName = vhdName + ".vhd"
|
||||
|
||||
tags := make(map[string]string)
|
||||
tags["createdby"] = "k8sAzureDataDisk"
|
||||
glog.V(4).Infof("azureDisk - creating page blob %name in container %s account %s", vhdName, containerName, accountName)
|
||||
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
blob.Properties.ContentLength = vhdSize
|
||||
blob.Metadata = tags
|
||||
err = blob.PutPageBlob(nil)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to put page blob %s in container %s: %v", vhdName, containerName, err)
|
||||
}
|
||||
|
||||
// add VHD signature to the blob
|
||||
h, err := createVHDHeader(uint64(size))
|
||||
if err != nil {
|
||||
blob.DeleteIfExists(nil)
|
||||
return "", "", fmt.Errorf("failed to create vhd header, err: %v", err)
|
||||
}
|
||||
|
||||
blobRange := azstorage.BlobRange{
|
||||
Start: uint64(size),
|
||||
End: uint64(vhdSize - 1),
|
||||
}
|
||||
if err = blob.WriteRange(blobRange, bytes.NewBuffer(h[:vhd.VHD_HEADER_SIZE]), nil); err != nil {
|
||||
glog.Infof("azureDisk - failed to put header page for data disk %s in container %s account %s, error was %s\n",
|
||||
vhdName, containerName, accountName, err.Error())
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
scheme := "http"
|
||||
if useHTTPSForBlobBasedDisk {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
host := fmt.Sprintf("%s://%s.%s.%s", scheme, accountName, blobServiceName, c.common.storageEndpointSuffix)
|
||||
uri := fmt.Sprintf("%s/%s/%s", host, containerName, vhdName)
|
||||
return vhdName, uri, nil
|
||||
}
|
||||
|
||||
// delete a vhd blob
|
||||
func (c *BlobDiskController) deleteVhdBlob(accountName, accountKey, blobName string) error {
|
||||
client, err := azstorage.NewBasicClient(accountName, accountKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobSvc := client.GetBlobService()
|
||||
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(blobName)
|
||||
return blob.Delete(nil)
|
||||
}
|
||||
|
||||
//CreateBlobDisk : create a blob disk in a node
|
||||
func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating blob data disk named:%s on StorageAccountType:%s StandAlone:%v", dataDiskName, storageAccountType, forceStandAlone)
|
||||
|
||||
var storageAccountName = ""
|
||||
var err error
|
||||
|
||||
if forceStandAlone {
|
||||
// we have to wait until the storage account is is created
|
||||
storageAccountName = "p" + MakeCRC32(c.common.subscriptionID+c.common.resourceGroup+dataDiskName)
|
||||
err = c.createStorageAccount(storageAccountName, storageAccountType, c.common.location, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
storageAccountName, err = c.findSANameForDisk(storageAccountType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
blobClient, err := c.getBlobSvcClient(storageAccountName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if !forceStandAlone {
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, 1)
|
||||
}
|
||||
|
||||
return diskURI, nil
|
||||
}
|
||||
|
||||
//DeleteBlobDisk : delete a blob disk from a node
|
||||
func (c *BlobDiskController) DeleteBlobDisk(diskURI string, wasForced bool) error {
|
||||
storageAccountName, vhdName, err := diskNameandSANameFromURI(diskURI)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, ok := c.accounts[storageAccountName]
|
||||
if !ok {
|
||||
// the storage account is specified by user
|
||||
glog.V(4).Infof("azureDisk - deleting volume %s", diskURI)
|
||||
return c.DeleteVolume(diskURI)
|
||||
}
|
||||
// if forced (as in one disk = one storage account)
|
||||
// delete the account completely
|
||||
if wasForced {
|
||||
return c.deleteStorageAccount(storageAccountName)
|
||||
}
|
||||
|
||||
blobSvc, err := c.getBlobSvcClient(storageAccountName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName)
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
_, err = blob.DeleteIfExists(nil)
|
||||
|
||||
if c.accounts[storageAccountName].diskCount == -1 {
|
||||
if diskCount, err := c.getDiskCount(storageAccountName); err != nil {
|
||||
c.accounts[storageAccountName].diskCount = int32(diskCount)
|
||||
} else {
|
||||
glog.Warningf("azureDisk - failed to get disk count for %s however the delete disk operation was ok", storageAccountName)
|
||||
return nil // we have failed to aquire a new count. not an error condition
|
||||
}
|
||||
}
|
||||
atomic.AddInt32(&c.accounts[storageAccountName].diskCount, -1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Init tries best effort to ensure that 2 accounts standard/premium were created
|
||||
// to be used by shared blob disks. This to increase the speed pvc provisioning (in most of cases)
|
||||
func (c *BlobDiskController) init() error {
|
||||
if !c.shouldInit() {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.setUniqueStrings()
|
||||
|
||||
// get accounts
|
||||
accounts, err := c.getAllStorageAccounts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.accounts = accounts
|
||||
|
||||
if len(c.accounts) == 0 {
|
||||
counter := 1
|
||||
for counter <= storageAccountsCountInit {
|
||||
|
||||
accountType := storage.PremiumLRS
|
||||
if n := math.Mod(float64(counter), 2); n == 0 {
|
||||
accountType = storage.StandardLRS
|
||||
}
|
||||
|
||||
// We don't really care if these calls failed
|
||||
// at this stage, we are trying to ensure 2 accounts (Standard/Premium)
|
||||
// are there ready for PVC creation
|
||||
|
||||
// if we failed here, the accounts will be created in the process
|
||||
// of creating PVC
|
||||
|
||||
// nor do we care if they were partially created, as the entire
|
||||
// account creation process is idempotent
|
||||
go func(thisNext int) {
|
||||
newAccountName := getAccountNameForNum(thisNext)
|
||||
|
||||
glog.Infof("azureDisk - BlobDiskController init process will create new storageAccount:%s type:%s", newAccountName, accountType)
|
||||
err := c.createStorageAccount(newAccountName, accountType, c.common.location, true)
|
||||
// TODO return created and error from
|
||||
if err != nil {
|
||||
glog.Infof("azureDisk - BlobDiskController init: create account %s with error:%s", newAccountName, err.Error())
|
||||
|
||||
} else {
|
||||
glog.Infof("azureDisk - BlobDiskController init: created account %s", newAccountName)
|
||||
}
|
||||
}(counter)
|
||||
counter = counter + 1
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//Sets unique strings to be used as accountnames && || blob containers names
|
||||
func (c *BlobDiskController) setUniqueStrings() {
|
||||
uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID
|
||||
hash := MakeCRC32(uniqueString)
|
||||
//used to generate a unqie container name used by this cluster PVC
|
||||
defaultContainerName = hash
|
||||
|
||||
storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash)
|
||||
// Used to filter relevant accounts (accounts used by shared PVC)
|
||||
storageAccountNameMatch = storageAccountNamePrefix
|
||||
// Used as a template to create new names for relevant accounts
|
||||
storageAccountNamePrefix = storageAccountNamePrefix + "%s"
|
||||
}
|
||||
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
listKeysResult, err := c.common.cloud.StorageAccountClient.ListKeys(c.common.resourceGroup, SAName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if listKeysResult.Keys == nil {
|
||||
return "", fmt.Errorf("azureDisk - empty listKeysResult in storage account:%s keys", SAName)
|
||||
}
|
||||
for _, v := range *listKeysResult.Keys {
|
||||
if v.Value != nil && *v.Value == "key1" {
|
||||
if _, ok := c.accounts[SAName]; !ok {
|
||||
glog.Warningf("azureDisk - account %s was not cached while getting keys", SAName)
|
||||
return *v.Value, nil
|
||||
}
|
||||
}
|
||||
|
||||
c.accounts[SAName].key = *v.Value
|
||||
return c.accounts[SAName].key, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("couldn't find key named key1 in storage account:%s keys", SAName)
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getBlobSvcClient(SAName string) (azstorage.BlobStorageClient, error) {
|
||||
key := ""
|
||||
var client azstorage.Client
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
var err error
|
||||
if key, err = c.getStorageAccountKey(SAName); err != nil {
|
||||
return blobSvc, err
|
||||
}
|
||||
|
||||
if client, err = azstorage.NewBasicClient(SAName, key); err != nil {
|
||||
return blobSvc, err
|
||||
}
|
||||
|
||||
blobSvc = client.GetBlobService()
|
||||
return blobSvc, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) error {
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
// short circut the check via local cache
|
||||
// we are forgiving the fact that account may not be in cache yet
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
// not cached, check existance and readiness
|
||||
bExist, provisionState, _ := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
// account does not exist
|
||||
if !bExist {
|
||||
return fmt.Errorf("azureDisk - account %s does not exist while trying to create/ensure default container", storageAccountName)
|
||||
}
|
||||
|
||||
// account exists but not ready yet
|
||||
if provisionState != storage.Succeeded {
|
||||
// we don't want many attempts to validate the account readiness
|
||||
// here hence we are locking
|
||||
counter := 1
|
||||
for swapped := atomic.CompareAndSwapInt32(&c.accounts[storageAccountName].isValidating, 0, 1); swapped != true; {
|
||||
time.Sleep(3 * time.Second)
|
||||
counter = counter + 1
|
||||
// check if we passed the max sleep
|
||||
if counter >= 20 {
|
||||
return fmt.Errorf("azureDisk - timeout waiting to aquire lock to validate account:%s readiness", storageAccountName)
|
||||
}
|
||||
}
|
||||
|
||||
// swapped
|
||||
defer func() {
|
||||
c.accounts[storageAccountName].isValidating = 0
|
||||
}()
|
||||
|
||||
// short circut the check again.
|
||||
if v, ok := c.accounts[storageAccountName]; ok && v.defaultContainerCreated {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
_, provisionState, err := c.getStorageAccountState(storageAccountName)
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s err %s", storageAccountName, err.Error())
|
||||
return false, err
|
||||
}
|
||||
|
||||
if provisionState == storage.Succeeded {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - GetStorageAccount:%s not ready yet", storageAccountName)
|
||||
// leave it for next loop/sync loop
|
||||
return false, fmt.Errorf("azureDisk - Account %s has not been flagged Succeeded by ARM", storageAccountName)
|
||||
})
|
||||
// we have failed to ensure that account is ready for us to create
|
||||
// the default vhd container
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if blobSvc, err = c.getBlobSvcClient(storageAccountName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bCreated {
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName)
|
||||
}
|
||||
|
||||
// flag so we no longer have to check on ARM
|
||||
c.accounts[storageAccountName].defaultContainerCreated = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gets Disk counts per storage account
|
||||
func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
// if we have it in cache
|
||||
if c.accounts[SAName].diskCount != -1 {
|
||||
return int(c.accounts[SAName].diskCount), nil
|
||||
}
|
||||
|
||||
var err error
|
||||
var blobSvc azstorage.BlobStorageClient
|
||||
|
||||
if err = c.ensureDefaultContainer(SAName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if blobSvc, err = c.getBlobSvcClient(SAName); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
params := azstorage.ListBlobsParameters{}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
response, err := container.ListBlobs(params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
glog.V(4).Infof("azure-Disk - refreshed data count for account %s and found %v", SAName, len(response.Blobs))
|
||||
c.accounts[SAName].diskCount = int32(len(response.Blobs))
|
||||
|
||||
return int(c.accounts[SAName].diskCount), nil
|
||||
}
|
||||
|
||||
// shouldInit ensures that we only init the plugin once
|
||||
// and we only do that in the controller
|
||||
|
||||
func (c *BlobDiskController) shouldInit() bool {
|
||||
if os.Args[0] == "kube-controller-manager" || (os.Args[0] == "/hyperkube" && os.Args[1] == "controller-manager") {
|
||||
swapped := atomic.CompareAndSwapInt64(&initFlag, 0, 1)
|
||||
if swapped {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccountState, error) {
|
||||
accountListResult, err := c.common.cloud.StorageAccountClient.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if accountListResult.Value == nil {
|
||||
return nil, fmt.Errorf("azureDisk - empty accountListResult")
|
||||
}
|
||||
|
||||
accounts := make(map[string]*storageAccountState)
|
||||
for _, v := range *accountListResult.Value {
|
||||
if strings.Index(*v.Name, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Infof("azureDisk - accountListResult Name or Sku is nil")
|
||||
continue
|
||||
}
|
||||
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
|
||||
sastate := &storageAccountState{
|
||||
name: *v.Name,
|
||||
saType: (*v.Sku).Name,
|
||||
diskCount: -1,
|
||||
}
|
||||
|
||||
accounts[*v.Name] = sastate
|
||||
}
|
||||
|
||||
return accounts, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) createStorageAccount(storageAccountName string, storageAccountType storage.SkuName, location string, checkMaxAccounts bool) error {
|
||||
bExist, _, _ := c.getStorageAccountState(storageAccountName)
|
||||
if bExist {
|
||||
newAccountState := &storageAccountState{
|
||||
diskCount: -1,
|
||||
saType: storageAccountType,
|
||||
name: storageAccountName,
|
||||
}
|
||||
|
||||
c.addAccountState(storageAccountName, newAccountState)
|
||||
}
|
||||
// Account Does not exist
|
||||
if !bExist {
|
||||
if len(c.accounts) == maxStorageAccounts && checkMaxAccounts {
|
||||
return fmt.Errorf("azureDisk - can not create new storage account, current storage accounts count:%v Max is:%v", len(c.accounts), maxStorageAccounts)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Creating storage account %s type %s \n", storageAccountName, string(storageAccountType))
|
||||
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storageAccountType},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure-dd")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errChan := c.common.cloud.StorageAccountClient.Create(c.common.resourceGroup, storageAccountName, cp, cancel)
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return fmt.Errorf(fmt.Sprintf("Create Storage Account: %s, error: %s", storageAccountName, err))
|
||||
}
|
||||
|
||||
newAccountState := &storageAccountState{
|
||||
diskCount: -1,
|
||||
saType: storageAccountType,
|
||||
name: storageAccountName,
|
||||
}
|
||||
|
||||
c.addAccountState(storageAccountName, newAccountState)
|
||||
}
|
||||
|
||||
if !bExist {
|
||||
// SA Accounts takes time to be provisioned
|
||||
// so if this account was just created allow it sometime
|
||||
// before polling
|
||||
glog.V(2).Infof("azureDisk - storage account %s was just created, allowing time before polling status")
|
||||
time.Sleep(25 * time.Second) // as observed 25 is the average time for SA to be provisioned
|
||||
}
|
||||
|
||||
// finally, make sure that we default container is created
|
||||
// before handing it back over
|
||||
return c.ensureDefaultContainer(storageAccountName)
|
||||
}
|
||||
|
||||
// finds a new suitable storageAccount for this disk
|
||||
func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuName) (string, error) {
|
||||
maxDiskCount := maxDisksPerStorageAccounts
|
||||
SAName := ""
|
||||
totalDiskCounts := 0
|
||||
countAccounts := 0 // account of this type.
|
||||
for _, v := range c.accounts {
|
||||
// filter out any stand-alone disks/accounts
|
||||
if strings.Index(v.name, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// note: we compute avge stratified by type.
|
||||
// this to enable user to grow per SA type to avoid low
|
||||
//avg utilization on one account type skewing all data.
|
||||
|
||||
if v.saType == storageAccountType {
|
||||
// compute average
|
||||
dCount, err := c.getDiskCount(v.name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
totalDiskCounts = totalDiskCounts + dCount
|
||||
countAccounts = countAccounts + 1
|
||||
// empty account
|
||||
if dCount == 0 {
|
||||
glog.V(2).Infof("azureDisk - account %s identified for a new disk is because it has 0 allocated disks", v.name)
|
||||
return v.name, nil // shortcircut, avg is good and no need to adjust
|
||||
}
|
||||
// if this account is less allocated
|
||||
if dCount < maxDiskCount {
|
||||
maxDiskCount = dCount
|
||||
SAName = v.name
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we failed to find storageaccount
|
||||
if SAName == "" {
|
||||
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
disksAfter := totalDiskCounts + 1 // with the new one!
|
||||
|
||||
avgUtilization := float64(disksAfter) / float64(countAccounts*maxDisksPerStorageAccounts)
|
||||
aboveAvg := (avgUtilization > storageAccountUtilizationBeforeGrowing)
|
||||
|
||||
// avg are not create and we should craete more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return SAName, nil
|
||||
}
|
||||
|
||||
// avergates are not ok and we are at capacity(max storage accounts allowed)
|
||||
if aboveAvg && countAccounts == maxStorageAccounts {
|
||||
glog.Infof("azureDisk - shared storageAccounts utilzation(%v) > grow-at-avg-utilization (%v). But k8s maxed on SAs for PVC(%v). k8s will now exceed grow-at-avg-utilization without adding accounts",
|
||||
avgUtilization, storageAccountUtilizationBeforeGrowing, maxStorageAccounts)
|
||||
}
|
||||
|
||||
// we found a storage accounts && [ avg are ok || we reached max sa count ]
|
||||
return SAName, nil
|
||||
}
|
||||
func (c *BlobDiskController) getNextAccountNum() int {
|
||||
max := 0
|
||||
|
||||
for k := range c.accounts {
|
||||
// filter out accounts that are for standalone
|
||||
if strings.Index(k, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
num := getAccountNumFromName(k)
|
||||
if num > max {
|
||||
max = num
|
||||
}
|
||||
}
|
||||
|
||||
return max + 1
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) deleteStorageAccount(storageAccountName string) error {
|
||||
resp, err := c.common.cloud.StorageAccountClient.Delete(c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - Delete of storage account '%s' failed with status %s...%v", storageAccountName, resp.Status, err)
|
||||
}
|
||||
|
||||
c.removeAccountState(storageAccountName)
|
||||
|
||||
glog.Infof("azureDisk - Storage Account %s was deleted", storageAccountName)
|
||||
return nil
|
||||
}
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
account, err := c.common.cloud.StorageAccountClient.GetProperties(c.common.resourceGroup, storageAccountName)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
return true, account.AccountProperties.ProvisioningState, nil
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) addAccountState(key string, state *storageAccountState) {
|
||||
accountsLock.Lock()
|
||||
defer accountsLock.Unlock()
|
||||
|
||||
if _, ok := c.accounts[key]; !ok {
|
||||
c.accounts[key] = state
|
||||
}
|
||||
}
|
||||
|
||||
func (c *BlobDiskController) removeAccountState(key string) {
|
||||
accountsLock.Lock()
|
||||
defer accountsLock.Unlock()
|
||||
delete(c.accounts, key)
|
||||
}
|
||||
|
||||
// pads account num with zeros as needed
|
||||
func getAccountNameForNum(num int) string {
|
||||
sNum := strconv.Itoa(num)
|
||||
missingZeros := 3 - len(sNum)
|
||||
strZero := ""
|
||||
for missingZeros > 0 {
|
||||
strZero = strZero + "0"
|
||||
missingZeros = missingZeros - 1
|
||||
}
|
||||
|
||||
sNum = strZero + sNum
|
||||
return fmt.Sprintf(storageAccountNamePrefix, sNum)
|
||||
}
|
||||
|
||||
func getAccountNumFromName(accountName string) int {
|
||||
nameLen := len(accountName)
|
||||
num, _ := strconv.Atoi(accountName[nameLen-3:])
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
err := binary.Write(b, binary.BigEndian, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func diskNameandSANameFromURI(diskURI string) (string, string, error) {
|
||||
uri, err := url.Parse(diskURI)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
hostName := uri.Host
|
||||
storageAccountName := strings.Split(hostName, ".")[0]
|
||||
|
||||
segments := strings.Split(uri.Path, "/")
|
||||
diskNameVhd := segments[len(segments)-1]
|
||||
|
||||
return storageAccountName, diskNameVhd, nil
|
||||
}
|
270
pkg/cloudprovider/providers/azure/azure_controllerCommon.go
Normal file
270
pkg/cloudprovider/providers/azure/azure_controllerCommon.go
Normal file
@@ -0,0 +1,270 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDataDiskCount int = 16 // which will allow you to work with most medium size VMs (if not found in map)
|
||||
storageAccountNameTemplate = "pvc%s"
|
||||
|
||||
// for limits check https://docs.microsoft.com/en-us/azure/azure-subscription-service-limits#storage-limits
|
||||
maxStorageAccounts = 100 // max # is 200 (250 with special request). this allows 100 for everything else including stand alone disks
|
||||
maxDisksPerStorageAccounts = 60
|
||||
storageAccountUtilizationBeforeGrowing = 0.5
|
||||
storageAccountsCountInit = 2 // When the plug-in is init-ed, 2 storage accounts will be created to allow fast pvc create/attach/mount
|
||||
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
)
|
||||
|
||||
var defaultBackOff = kwait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 2 * time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.0,
|
||||
}
|
||||
|
||||
type controllerCommon struct {
|
||||
tenantID string
|
||||
subscriptionID string
|
||||
location string
|
||||
storageEndpointSuffix string
|
||||
resourceGroup string
|
||||
clientID string
|
||||
clientSecret string
|
||||
managementEndpoint string
|
||||
tokenEndPoint string
|
||||
aadResourceEndPoint string
|
||||
aadToken string
|
||||
expiresOn time.Time
|
||||
cloud *Cloud
|
||||
}
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
if isManagedDisk {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
ManagedDisk: &compute.ManagedDiskParameters{
|
||||
ID: &diskURI,
|
||||
},
|
||||
})
|
||||
} else {
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
})
|
||||
}
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.resourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure attach failed, err: %v", err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) {
|
||||
// if lease cannot be acquired, immediately detach the disk and return the original error
|
||||
glog.Infof("azureDisk - failed to acquire disk lease, try detach")
|
||||
c.cloud.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure attach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (c *controllerCommon) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil || !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
bFoundDisk := false
|
||||
for i, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && diskURI != "" && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
bFoundDisk = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !bFoundDisk {
|
||||
return fmt.Errorf("detach azure disk failure, disk %s not found, diskURI: %s", diskName, diskURI)
|
||||
}
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk", c.resourceGroup, vmName)
|
||||
c.cloud.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := c.cloud.VirtualMachinesClient.CreateOrUpdate(c.resourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if c.cloud.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("azureDisk - update(%s) backing off: vm(%s)", c.resourceGroup, vmName)
|
||||
retryErr := c.cloud.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("azureDisk - update(%s) abort backoff: vm(%s)", c.cloud.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azureDisk - azure disk detach failed, err: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("azureDisk - azure disk detach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (c *controllerCommon) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) ||
|
||||
(disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) ||
|
||||
(disk.ManagedDisk != nil && *disk.ManagedDisk.ID == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("azureDisk - find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (c *controllerCommon) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (c *controllerCommon) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, exists, err := c.cloud.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("azureDisk - Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
} else if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
@@ -23,6 +23,10 @@ import (
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
useHTTPS = true
|
||||
)
|
||||
|
||||
// create file share
|
||||
func (az *Cloud) createFileShare(accountName, accountKey, name string, sizeGB int) error {
|
||||
fileClient, err := az.getFileSvcClient(accountName, accountKey)
|
||||
@@ -55,7 +59,7 @@ func (az *Cloud) deleteFileShare(accountName, accountKey, name string) error {
|
||||
share := fileClient.GetShareReference(name)
|
||||
return share.Delete(nil)
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (az *Cloud) getFileSvcClient(accountName, accountKey string) (*azs.FileServiceClient, error) {
|
||||
|
129
pkg/cloudprovider/providers/azure/azure_managedDiskController.go
Normal file
129
pkg/cloudprovider/providers/azure/azure_managedDiskController.go
Normal file
@@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/disk"
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/golang/glog"
|
||||
kwait "k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
//ManagedDiskController : managed disk controller struct
|
||||
type ManagedDiskController struct {
|
||||
common *controllerCommon
|
||||
}
|
||||
|
||||
func newManagedDiskController(common *controllerCommon) (*ManagedDiskController, error) {
|
||||
return &ManagedDiskController{common: common}, nil
|
||||
}
|
||||
|
||||
//CreateManagedDisk : create managed disk
|
||||
func (c *ManagedDiskController) CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error) {
|
||||
glog.V(4).Infof("azureDisk - creating new managed Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
|
||||
newTags := make(map[string]*string)
|
||||
azureDDTag := "kubernetes-azure-dd"
|
||||
newTags["created-by"] = &azureDDTag
|
||||
|
||||
// insert original tags to newTags
|
||||
if tags != nil {
|
||||
for k, v := range tags {
|
||||
// Azure won't allow / (forward slash) in tags
|
||||
newKey := strings.Replace(k, "/", "-", -1)
|
||||
newValue := strings.Replace(v, "/", "-", -1)
|
||||
newTags[newKey] = &newValue
|
||||
}
|
||||
}
|
||||
|
||||
diskSizeGB := int32(sizeGB)
|
||||
model := disk.Model{
|
||||
Location: &c.common.location,
|
||||
Tags: &newTags,
|
||||
Properties: &disk.Properties{
|
||||
AccountType: disk.StorageAccountTypes(storageAccountType),
|
||||
DiskSizeGB: &diskSizeGB,
|
||||
CreationData: &disk.CreationData{CreateOption: disk.Empty},
|
||||
}}
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.CreateOrUpdate(c.common.resourceGroup, diskName, model, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
diskID := ""
|
||||
|
||||
err = kwait.ExponentialBackoff(defaultBackOff, func() (bool, error) {
|
||||
provisonState, id, err := c.getDisk(diskName)
|
||||
diskID = id
|
||||
// We are waiting for provisioningState==Succeeded
|
||||
// We don't want to hand-off managed disks to k8s while they are
|
||||
//still being provisioned, this is to avoid some race conditions
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if strings.ToLower(provisonState) == "succeeded" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v but was unable to confirm provisioningState in poll process", diskName, storageAccountType, sizeGB)
|
||||
} else {
|
||||
glog.V(2).Infof("azureDisk - created new MD Name:%s StorageAccountType:%s Size:%v", diskName, storageAccountType, sizeGB)
|
||||
}
|
||||
|
||||
return diskID, nil
|
||||
}
|
||||
|
||||
//DeleteManagedDisk : delete managed disk
|
||||
func (c *ManagedDiskController) DeleteManagedDisk(diskURI string) error {
|
||||
diskName := path.Base(diskURI)
|
||||
cancel := make(chan struct{})
|
||||
respChan, errChan := c.common.cloud.DisksClient.Delete(c.common.resourceGroup, diskName, cancel)
|
||||
<-respChan
|
||||
err := <-errChan
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We don't need poll here, k8s will immediatly stop referencing the disk
|
||||
// the disk will be evantually deleted - cleanly - by ARM
|
||||
|
||||
glog.V(2).Infof("azureDisk - deleted a managed disk: %s", diskURI)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// return: disk provisionState, diskID, error
|
||||
func (c *ManagedDiskController) getDisk(diskName string) (string, string, error) {
|
||||
result, err := c.common.cloud.DisksClient.Get(c.common.resourceGroup, diskName)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
if result.Properties != nil && (*result.Properties).ProvisioningState != nil {
|
||||
return *(*result.Properties).ProvisioningState, *result.ID, nil
|
||||
}
|
||||
|
||||
return "", "", err
|
||||
}
|
@@ -18,264 +18,10 @@ package azure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
maxLUN = 64 // max number of LUNs per VM
|
||||
errLeaseFailed = "AcquireDiskLeaseFailed"
|
||||
errLeaseIDMissing = "LeaseIdMissing"
|
||||
errContainerNotFound = "ContainerNotFound"
|
||||
)
|
||||
|
||||
// AttachDisk attaches a vhd to vm
|
||||
// the vhd must exist, can be identified by diskName, diskURI, and lun.
|
||||
func (az *Cloud) AttachDisk(diskName, diskURI string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !exists {
|
||||
return cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
disks = append(disks,
|
||||
compute.DataDisk{
|
||||
Name: &diskName,
|
||||
Vhd: &compute.VirtualHardDisk{
|
||||
URI: &diskURI,
|
||||
},
|
||||
Lun: &lun,
|
||||
Caching: cachingMode,
|
||||
CreateOption: "attach",
|
||||
})
|
||||
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName)
|
||||
retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azure attach failed, err: %v", err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseFailed) {
|
||||
// if lease cannot be acquired, immediately detach the disk and return the original error
|
||||
glog.Infof("failed to acquire disk lease, try detach")
|
||||
az.DetachDiskByName(diskName, diskURI, nodeName)
|
||||
}
|
||||
} else {
|
||||
glog.V(4).Infof("azure attach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DisksAreAttached checks if a list of volumes are attached to the node with the specified NodeName
|
||||
func (az *Cloud) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("Cannot find node %q, DisksAreAttached will assume disks %v are not attached to it.",
|
||||
nodeName, diskNames)
|
||||
return attached, nil
|
||||
} else if err != nil {
|
||||
return attached, err
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
for _, diskName := range diskNames {
|
||||
if disk.Name != nil && diskName != "" && *disk.Name == diskName {
|
||||
attached[diskName] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
// DetachDiskByName detaches a vhd from host
|
||||
// the vhd can be identified by diskName or diskURI
|
||||
func (az *Cloud) DetachDiskByName(diskName, diskURI string, nodeName types.NodeName) error {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil || !exists {
|
||||
// if host doesn't exist, no need to detach
|
||||
glog.Warningf("cannot find node %s, skip detaching disk %s", nodeName, diskName)
|
||||
return nil
|
||||
}
|
||||
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for i, disk := range disks {
|
||||
if (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("detach disk: name %q uri %q", diskName, diskURI)
|
||||
disks = append(disks[:i], disks[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
newVM := compute.VirtualMachine{
|
||||
Location: vm.Location,
|
||||
VirtualMachineProperties: &compute.VirtualMachineProperties{
|
||||
StorageProfile: &compute.StorageProfile{
|
||||
DataDisks: &disks,
|
||||
},
|
||||
},
|
||||
}
|
||||
vmName := mapNodeNameToVMName(nodeName)
|
||||
glog.V(2).Infof("create(%s): vm(%s)", az.ResourceGroup, vmName)
|
||||
az.operationPollRateLimiter.Accept()
|
||||
respChan, errChan := az.VirtualMachinesClient.CreateOrUpdate(az.ResourceGroup, vmName, newVM, nil)
|
||||
resp := <-respChan
|
||||
err = <-errChan
|
||||
if az.CloudProviderBackoff && shouldRetryAPIRequest(resp.Response, err) {
|
||||
glog.V(2).Infof("create(%s) backing off: vm(%s)", az.ResourceGroup, vmName)
|
||||
retryErr := az.CreateOrUpdateVMWithRetry(vmName, newVM)
|
||||
if retryErr != nil {
|
||||
err = retryErr
|
||||
glog.V(2).Infof("create(%s) abort backoff: vm(%s)", az.ResourceGroup, vmName)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("azure disk detach failed, err: %v", err)
|
||||
} else {
|
||||
glog.V(4).Infof("azure disk detach succeeded")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDiskLun finds the lun on the host that the vhd is attached to, given a vhd's diskName and diskURI
|
||||
func (az *Cloud) GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil && (disk.Name != nil && diskName != "" && *disk.Name == diskName) || (disk.Vhd.URI != nil && diskURI != "" && *disk.Vhd.URI == diskURI) {
|
||||
// found the disk
|
||||
glog.V(4).Infof("find disk: lun %d name %q uri %q", *disk.Lun, diskName, diskURI)
|
||||
return *disk.Lun, nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("Cannot find Lun for disk %s", diskName)
|
||||
}
|
||||
|
||||
// GetNextDiskLun searches all vhd attachment on the host and find unused lun
|
||||
// return -1 if all luns are used
|
||||
func (az *Cloud) GetNextDiskLun(nodeName types.NodeName) (int32, error) {
|
||||
vm, exists, err := az.getVirtualMachine(nodeName)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
} else if !exists {
|
||||
return -1, cloudprovider.InstanceNotFound
|
||||
}
|
||||
used := make([]bool, maxLUN)
|
||||
disks := *vm.StorageProfile.DataDisks
|
||||
for _, disk := range disks {
|
||||
if disk.Lun != nil {
|
||||
used[*disk.Lun] = true
|
||||
}
|
||||
}
|
||||
for k, v := range used {
|
||||
if !v {
|
||||
return int32(k), nil
|
||||
}
|
||||
}
|
||||
return -1, fmt.Errorf("All Luns are used")
|
||||
}
|
||||
|
||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (az *Cloud) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = az.getStorageAccounts()
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", 0, err
|
||||
}
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if ((storageType == "" || account.StorageType == storageType) && (location == "" || account.Location == location)) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := az.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("no key found for storage account %s", account.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
name, uri, err := az.createVhdBlob(account.Name, key, name, int64(requestGB), nil)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("failed to create vhd in account %s: %v", account.Name, err)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("created vhd blob uri: %s", uri)
|
||||
return name, uri, requestGB, err
|
||||
}
|
||||
}
|
||||
return "", "", 0, fmt.Errorf("failed to find a matching storage account")
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
func (az *Cloud) DeleteVolume(name, uri string) error {
|
||||
accountName, blob, err := az.getBlobNameAndAccountFromURI(uri)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse vhd URI %v", err)
|
||||
}
|
||||
key, err := az.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no key for storage account %s, err %v", accountName, err)
|
||||
}
|
||||
err = az.deleteVhdBlob(accountName, key, blob)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to delete blob %s err: %v", uri, err)
|
||||
detail := err.Error()
|
||||
if strings.Contains(detail, errLeaseIDMissing) {
|
||||
// disk is still being used
|
||||
// see https://msdn.microsoft.com/en-us/library/microsoft.windowsazure.storage.blob.protocol.bloberrorcodestrings.leaseidmissing.aspx
|
||||
return volume.NewDeletedVolumeInUseError(fmt.Sprintf("disk %q is still in use while being deleted", name))
|
||||
}
|
||||
return fmt.Errorf("failed to delete vhd %v, account %s, blob %s, err: %v", uri, accountName, blob, err)
|
||||
}
|
||||
glog.V(4).Infof("blob %s deleted", uri)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(name, storageAccount, storageType, location string, requestGB int) (string, string, error) {
|
||||
var err error
|
||||
|
@@ -19,7 +19,9 @@ package azure
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
@@ -293,3 +295,58 @@ func splitProviderID(providerID string) (types.NodeName, error) {
|
||||
}
|
||||
return types.NodeName(matches[1]), nil
|
||||
}
|
||||
|
||||
var polyTable = crc32.MakeTable(crc32.Koopman)
|
||||
|
||||
//MakeCRC32 : convert string to CRC32 format
|
||||
func MakeCRC32(str string) string {
|
||||
crc := crc32.New(polyTable)
|
||||
crc.Write([]byte(str))
|
||||
hash := crc.Sum32()
|
||||
return strconv.FormatUint(uint64(hash), 10)
|
||||
}
|
||||
|
||||
//ExtractVMData : extract dataDisks, storageProfile from a map struct
|
||||
func ExtractVMData(vmData map[string]interface{}) (dataDisks []interface{},
|
||||
storageProfile map[string]interface{},
|
||||
hardwareProfile map[string]interface{}, err error) {
|
||||
props, ok := vmData["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(properties) to map error")
|
||||
}
|
||||
|
||||
storageProfile, ok = props["storageProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(storageProfile) to map error")
|
||||
}
|
||||
|
||||
hardwareProfile, ok = props["hardwareProfile"].(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(hardwareProfile) to map error")
|
||||
}
|
||||
|
||||
dataDisks, ok = storageProfile["dataDisks"].([]interface{})
|
||||
if !ok {
|
||||
return nil, nil, nil, fmt.Errorf("convert vmData(dataDisks) to map error")
|
||||
}
|
||||
return dataDisks, storageProfile, hardwareProfile, nil
|
||||
}
|
||||
|
||||
//ExtractDiskData : extract provisioningState, diskState from a map struct
|
||||
func ExtractDiskData(diskData interface{}) (provisioningState string, diskState string, err error) {
|
||||
fragment, ok := diskData.(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData to map error")
|
||||
}
|
||||
|
||||
properties, ok := fragment["properties"].(map[string]interface{})
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("convert diskData(properties) to map error")
|
||||
}
|
||||
|
||||
provisioningState, ok = properties["provisioningState"].(string) // if there is a disk, provisioningState property will be there
|
||||
if ref, ok := properties["diskState"]; ok {
|
||||
diskState = ref.(string)
|
||||
}
|
||||
return provisioningState, diskState, nil
|
||||
}
|
||||
|
@@ -1,5 +1,4 @@
|
||||
approvers:
|
||||
- bprashanth
|
||||
- deads2k
|
||||
- derekwaynecarr
|
||||
- mikedanese
|
||||
|
@@ -1,5 +1,4 @@
|
||||
reviewers:
|
||||
- bprashanth
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
|
@@ -1,5 +1,4 @@
|
||||
approvers:
|
||||
- bprashanth
|
||||
- enisoc
|
||||
- foxish
|
||||
- janetkuo
|
||||
@@ -7,7 +6,6 @@ approvers:
|
||||
- kow3ns
|
||||
- smarterclayton
|
||||
reviewers:
|
||||
- bprashanth
|
||||
- enisoc
|
||||
- foxish
|
||||
- janetkuo
|
||||
|
@@ -2,9 +2,7 @@ approvers:
|
||||
- caesarxuchao
|
||||
- kargakis
|
||||
- lavalamp
|
||||
- bprashanth
|
||||
reviewers:
|
||||
- caesarxuchao
|
||||
- kargakis
|
||||
- lavalamp
|
||||
- bprashanth
|
||||
|
@@ -2,9 +2,7 @@ approvers:
|
||||
- caesarxuchao
|
||||
- kargakis
|
||||
- lavalamp
|
||||
- bprashanth
|
||||
reviewers:
|
||||
- caesarxuchao
|
||||
- kargakis
|
||||
- lavalamp
|
||||
- bprashanth
|
||||
|
@@ -1,5 +1,4 @@
|
||||
reviewers:
|
||||
- bprashanth
|
||||
- bowei
|
||||
- MrHohn
|
||||
- thockin
|
||||
|
@@ -1,5 +1,4 @@
|
||||
approvers:
|
||||
- bprashanth
|
||||
- enisoc
|
||||
- foxish
|
||||
- janetkuo
|
||||
@@ -7,7 +6,6 @@ approvers:
|
||||
- kow3ns
|
||||
- smarterclayton
|
||||
reviewers:
|
||||
- bprashanth
|
||||
- enisoc
|
||||
- foxish
|
||||
- janetkuo
|
||||
|
@@ -17,11 +17,9 @@ go_library(
|
||||
"//pkg/credentialprovider:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/containerregistry:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/adal:go_default_library",
|
||||
"//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/gopkg.in/yaml.v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -17,14 +17,12 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/containerregistry"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
azureapi "github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
@@ -47,10 +45,12 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// RegistriesClient is a testable interface for the ACR client List operation.
|
||||
type RegistriesClient interface {
|
||||
List() (containerregistry.RegistryListResult, error)
|
||||
}
|
||||
|
||||
// NewACRProvider parses the specified configFile and returns a DockerConfigProvider
|
||||
func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider {
|
||||
return &acrProvider{
|
||||
file: configFile,
|
||||
@@ -59,24 +59,16 @@ func NewACRProvider(configFile *string) credentialprovider.DockerConfigProvider
|
||||
|
||||
type acrProvider struct {
|
||||
file *string
|
||||
config azure.Config
|
||||
environment azureapi.Environment
|
||||
config *azure.Config
|
||||
environment *azureapi.Environment
|
||||
registryClient RegistriesClient
|
||||
}
|
||||
|
||||
func (a *acrProvider) loadConfig(contents []byte) error {
|
||||
err := yaml.Unmarshal(contents, &a.config)
|
||||
func (a *acrProvider) loadConfig(rdr io.Reader) error {
|
||||
var err error
|
||||
a.config, a.environment, err = azure.ParseConfig(rdr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if a.config.Cloud == "" {
|
||||
a.environment = azureapi.PublicCloud
|
||||
} else {
|
||||
a.environment, err = azureapi.EnvironmentFromName(a.config.Cloud)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.Errorf("Failed to load azure credential file: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -86,27 +78,21 @@ func (a *acrProvider) Enabled() bool {
|
||||
glog.V(5).Infof("Azure config unspecified, disabling")
|
||||
return false
|
||||
}
|
||||
contents, err := ioutil.ReadFile(*a.file)
|
||||
|
||||
f, err := os.Open(*a.file)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to load azure credential file: %v", err)
|
||||
glog.Errorf("Failed to load config from file: %s", *a.file)
|
||||
return false
|
||||
}
|
||||
if err := a.loadConfig(contents); err != nil {
|
||||
glog.Errorf("Failed to parse azure credential file: %v", err)
|
||||
defer f.Close()
|
||||
|
||||
err = a.loadConfig(f)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to load config from file: %s", *a.file)
|
||||
return false
|
||||
}
|
||||
|
||||
oauthConfig, err := adal.NewOAuthConfig(a.environment.ActiveDirectoryEndpoint, a.config.TenantID)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get oauth config: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
servicePrincipalToken, err := adal.NewServicePrincipalToken(
|
||||
*oauthConfig,
|
||||
a.config.AADClientID,
|
||||
a.config.AADClientSecret,
|
||||
a.environment.ServiceManagementEndpoint)
|
||||
servicePrincipalToken, err := azure.GetServicePrincipalToken(a.config, a.environment)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create service principal token: %v", err)
|
||||
return false
|
||||
|
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/containerregistry"
|
||||
@@ -66,7 +67,7 @@ func Test(t *testing.T) {
|
||||
provider := &acrProvider{
|
||||
registryClient: fakeClient,
|
||||
}
|
||||
provider.loadConfig([]byte(configStr))
|
||||
provider.loadConfig(bytes.NewBufferString(configStr))
|
||||
|
||||
creds := provider.Provide()
|
||||
|
||||
|
@@ -56,6 +56,7 @@ type ApplyOptions struct {
|
||||
GracePeriod int
|
||||
PruneResources []pruneResource
|
||||
Timeout time.Duration
|
||||
cmdBaseName string
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -65,8 +66,6 @@ const (
|
||||
backOffPeriod = 1 * time.Second
|
||||
// how many times we can retry before back off
|
||||
triesBeforeBackOff = 1
|
||||
|
||||
warningNoLastAppliedConfigAnnotation = "Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply\n"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -92,11 +91,17 @@ var (
|
||||
|
||||
# Apply the configuration in manifest.yaml and delete all the other configmaps that are not in the file.
|
||||
kubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/ConfigMap`))
|
||||
|
||||
warningNoLastAppliedConfigAnnotation = "Warning: %[1]s apply should be used on resource created by either %[1]s create --save-config or %[1]s apply\n"
|
||||
)
|
||||
|
||||
func NewCmdApply(f cmdutil.Factory, out, errOut io.Writer) *cobra.Command {
|
||||
func NewCmdApply(baseName string, f cmdutil.Factory, out, errOut io.Writer) *cobra.Command {
|
||||
var options ApplyOptions
|
||||
|
||||
// Store baseName for use in printing warnings / messages involving the base command name.
|
||||
// This is useful for downstream command that wrap this one.
|
||||
options.cmdBaseName = baseName
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "apply -f FILENAME",
|
||||
Short: i18n.T("Apply a configuration to a resource by filename or stdin"),
|
||||
@@ -299,7 +304,7 @@ func RunApply(f cmdutil.Factory, cmd *cobra.Command, out, errOut io.Writer, opti
|
||||
return err
|
||||
}
|
||||
if _, ok := annotationMap[api.LastAppliedConfigAnnotation]; !ok {
|
||||
fmt.Fprintf(errOut, warningNoLastAppliedConfigAnnotation)
|
||||
fmt.Fprintf(errOut, warningNoLastAppliedConfigAnnotation, options.cmdBaseName)
|
||||
}
|
||||
overwrite := cmdutil.GetFlagBool(cmd, "overwrite")
|
||||
helper := resource.NewHelper(info.Client, info.Mapping)
|
||||
|
@@ -47,7 +47,7 @@ func TestApplyExtraArgsFail(t *testing.T) {
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
f, _, _, _ := cmdtesting.NewAPIFactory()
|
||||
c := NewCmdApply(f, buf, errBuf)
|
||||
c := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
if validateApplyArgs(c, []string{"rc"}) == nil {
|
||||
t.Fatalf("unexpected non-error")
|
||||
}
|
||||
@@ -377,14 +377,14 @@ func TestApplyObjectWithoutAnnotation(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameRC)
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{})
|
||||
|
||||
// uses the name from the file, not the response
|
||||
expectRC := "replicationcontroller/" + nameRC + "\n"
|
||||
expectWarning := warningNoLastAppliedConfigAnnotation
|
||||
expectWarning := fmt.Sprintf(warningNoLastAppliedConfigAnnotation, "kubectl")
|
||||
if errBuf.String() != expectWarning {
|
||||
t.Fatalf("unexpected non-warning: %s\nexpected: %s", errBuf.String(), expectWarning)
|
||||
}
|
||||
@@ -422,7 +422,7 @@ func TestApplyObject(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameRC)
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{})
|
||||
@@ -479,7 +479,7 @@ func TestApplyObjectOutput(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameRC)
|
||||
cmd.Flags().Set("output", "yaml")
|
||||
cmd.Run(cmd, []string{})
|
||||
@@ -533,7 +533,7 @@ func TestApplyRetry(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameRC)
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{})
|
||||
@@ -578,7 +578,7 @@ func TestApplyNonExistObject(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameRC)
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{})
|
||||
@@ -636,7 +636,7 @@ func testApplyMultipleObjects(t *testing.T, asList bool) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
if asList {
|
||||
cmd.Flags().Set("filename", filenameRCSVC)
|
||||
} else {
|
||||
@@ -729,7 +729,7 @@ func TestApplyNULLPreservation(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameDeployObjClientside)
|
||||
cmd.Flags().Set("output", "name")
|
||||
|
||||
@@ -789,7 +789,7 @@ func TestUnstructuredApply(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameWidgetClientside)
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{})
|
||||
@@ -876,7 +876,7 @@ func TestUnstructuredIdempotentApply(t *testing.T) {
|
||||
buf := bytes.NewBuffer([]byte{})
|
||||
errBuf := bytes.NewBuffer([]byte{})
|
||||
|
||||
cmd := NewCmdApply(f, buf, errBuf)
|
||||
cmd := NewCmdApply("kubectl", f, buf, errBuf)
|
||||
cmd.Flags().Set("filename", filenameWidgetClientside)
|
||||
cmd.Flags().Set("output", "name")
|
||||
cmd.Run(cmd, []string{})
|
||||
|
@@ -337,7 +337,7 @@ func NewKubectlCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob
|
||||
{
|
||||
Message: "Advanced Commands:",
|
||||
Commands: []*cobra.Command{
|
||||
NewCmdApply(f, out, err),
|
||||
NewCmdApply("kubectl", f, out, err),
|
||||
NewCmdPatch(f, out),
|
||||
NewCmdReplace(f, out),
|
||||
deprecatedAlias("update", NewCmdReplace(f, out)),
|
||||
|
@@ -37,6 +37,14 @@ type Version struct {
|
||||
ServerVersion *apimachineryversion.Info `json:"serverVersion,omitempty" yaml:"serverVersion,omitempty"`
|
||||
}
|
||||
|
||||
// VersionOptions: describe the options available to users of the "kubectl
|
||||
// version" command.
|
||||
type VersionOptions struct {
|
||||
clientOnly bool
|
||||
short bool
|
||||
output string
|
||||
}
|
||||
|
||||
var (
|
||||
versionExample = templates.Examples(i18n.T(`
|
||||
# Print the client and server versions for the current context
|
||||
@@ -50,66 +58,19 @@ func NewCmdVersion(f cmdutil.Factory, out io.Writer) *cobra.Command {
|
||||
Long: "Print the client and server version information for the current context",
|
||||
Example: versionExample,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := RunVersion(f, out, cmd)
|
||||
cmdutil.CheckErr(err)
|
||||
options := new(VersionOptions)
|
||||
cmdutil.CheckErr(options.Complete(cmd))
|
||||
cmdutil.CheckErr(options.Validate())
|
||||
cmdutil.CheckErr(options.Run(f, out))
|
||||
},
|
||||
}
|
||||
cmd.Flags().BoolP("client", "c", false, "Client version only (no server required).")
|
||||
cmd.Flags().BoolP("short", "", false, "Print just the version number.")
|
||||
cmd.Flags().String("output", "", "output format, options available are yaml and json")
|
||||
cmd.Flags().String("output", "", "one of 'yaml' or 'json'")
|
||||
cmd.Flags().MarkShorthandDeprecated("client", "please use --client instead.")
|
||||
return cmd
|
||||
}
|
||||
|
||||
func RunVersion(f cmdutil.Factory, out io.Writer, cmd *cobra.Command) error {
|
||||
var serverVersion *apimachineryversion.Info = nil
|
||||
var serverErr error = nil
|
||||
vo := Version{nil, nil}
|
||||
|
||||
clientVersion := version.Get()
|
||||
vo.ClientVersion = &clientVersion
|
||||
|
||||
if !cmdutil.GetFlagBool(cmd, "client") {
|
||||
serverVersion, serverErr = retrieveServerVersion(f)
|
||||
vo.ServerVersion = serverVersion
|
||||
}
|
||||
|
||||
switch of := cmdutil.GetFlagString(cmd, "output"); of {
|
||||
case "":
|
||||
if cmdutil.GetFlagBool(cmd, "short") {
|
||||
fmt.Fprintf(out, "Client Version: %s\n", clientVersion.GitVersion)
|
||||
|
||||
if serverVersion != nil {
|
||||
fmt.Fprintf(out, "Server Version: %s\n", serverVersion.GitVersion)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(out, "Client Version: %s\n", fmt.Sprintf("%#v", clientVersion))
|
||||
|
||||
if serverVersion != nil {
|
||||
fmt.Fprintf(out, "Server Version: %s\n", fmt.Sprintf("%#v", *serverVersion))
|
||||
}
|
||||
}
|
||||
case "yaml":
|
||||
y, err := yaml.Marshal(&vo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(out, string(y))
|
||||
case "json":
|
||||
y, err := json.Marshal(&vo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(out, string(y))
|
||||
default:
|
||||
return errors.New("invalid output format: " + of)
|
||||
|
||||
}
|
||||
|
||||
return serverErr
|
||||
}
|
||||
|
||||
func retrieveServerVersion(f cmdutil.Factory) (*apimachineryversion.Info, error) {
|
||||
discoveryClient, err := f.DiscoveryClient()
|
||||
if err != nil {
|
||||
@@ -120,3 +81,67 @@ func retrieveServerVersion(f cmdutil.Factory) (*apimachineryversion.Info, error)
|
||||
discoveryClient.Invalidate()
|
||||
return discoveryClient.ServerVersion()
|
||||
}
|
||||
|
||||
func (o *VersionOptions) Run(f cmdutil.Factory, out io.Writer) error {
|
||||
var (
|
||||
serverVersion *apimachineryversion.Info
|
||||
serverErr error
|
||||
versionInfo Version
|
||||
)
|
||||
|
||||
clientVersion := version.Get()
|
||||
versionInfo.ClientVersion = &clientVersion
|
||||
|
||||
if !o.clientOnly {
|
||||
serverVersion, serverErr = retrieveServerVersion(f)
|
||||
versionInfo.ServerVersion = serverVersion
|
||||
}
|
||||
|
||||
switch o.output {
|
||||
case "":
|
||||
if o.short {
|
||||
fmt.Fprintf(out, "Client Version: %s\n", clientVersion.GitVersion)
|
||||
if serverVersion != nil {
|
||||
fmt.Fprintf(out, "Server Version: %s\n", serverVersion.GitVersion)
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(out, "Client Version: %s\n", fmt.Sprintf("%#v", clientVersion))
|
||||
if serverVersion != nil {
|
||||
fmt.Fprintf(out, "Server Version: %s\n", fmt.Sprintf("%#v", *serverVersion))
|
||||
}
|
||||
}
|
||||
case "yaml":
|
||||
marshalled, err := yaml.Marshal(&versionInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(out, string(marshalled))
|
||||
case "json":
|
||||
marshalled, err := json.Marshal(&versionInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(out, string(marshalled))
|
||||
default:
|
||||
// There is a bug in the program if we hit this case.
|
||||
// However, we follow a policy of never panicking.
|
||||
return fmt.Errorf("VersionOptions were not validated: --output=%q should have been rejected", o.output)
|
||||
}
|
||||
|
||||
return serverErr
|
||||
}
|
||||
|
||||
func (o *VersionOptions) Complete(cmd *cobra.Command) error {
|
||||
o.clientOnly = cmdutil.GetFlagBool(cmd, "client")
|
||||
o.short = cmdutil.GetFlagBool(cmd, "short")
|
||||
o.output = cmdutil.GetFlagString(cmd, "output")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *VersionOptions) Validate() error {
|
||||
if o.output != "" && o.output != "yaml" && o.output != "json" {
|
||||
return errors.New(`--output must be 'yaml' or 'json'`)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@@ -18,7 +18,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
|
@@ -91,7 +91,7 @@ func GetStandardPrinter(outputOpts *OutputOptions, noHeaders bool, mapper meta.R
|
||||
|
||||
case "jsonpath-file":
|
||||
if len(formatArgument) == 0 {
|
||||
return nil, fmt.Errorf("jsonpath file format specified but no template file file given")
|
||||
return nil, fmt.Errorf("jsonpath file format specified but no template file given")
|
||||
}
|
||||
data, err := ioutil.ReadFile(formatArgument)
|
||||
if err != nil {
|
||||
|
@@ -1,6 +1,5 @@
|
||||
approvers:
|
||||
- thockin
|
||||
- bprashanth
|
||||
- matchstick
|
||||
reviewers:
|
||||
- thockin
|
||||
@@ -8,7 +7,6 @@ reviewers:
|
||||
- smarterclayton
|
||||
- brendandburns
|
||||
- vishh
|
||||
- bprashanth
|
||||
- justinsb
|
||||
- freehan
|
||||
- dcbw
|
||||
|
@@ -3,5 +3,4 @@ reviewers:
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- brendandburns
|
||||
- bprashanth
|
||||
- freehan
|
||||
|
@@ -1,7 +1,6 @@
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- bprashanth
|
||||
- justinsb
|
||||
- freehan
|
||||
- dcbw
|
||||
|
@@ -3,4 +3,3 @@ reviewers:
|
||||
- lavalamp
|
||||
- smarterclayton
|
||||
- freehan
|
||||
- bprashanth
|
||||
|
@@ -15,7 +15,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
|
@@ -1,6 +1,5 @@
|
||||
reviewers:
|
||||
- wojtek-t
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- soltysh
|
||||
- madhusudancs
|
||||
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- pmorie
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -12,12 +12,14 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"attacher.go",
|
||||
"azure_common.go",
|
||||
"azure_dd.go",
|
||||
"azure_mounter.go",
|
||||
"azure_provision.go",
|
||||
"vhd_util.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/azure:go_default_library",
|
||||
"//pkg/util/exec:go_default_library",
|
||||
@@ -26,37 +28,18 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/storage:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_dd_test.go",
|
||||
"vhd_util_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/github.com/Azure/azure-sdk-for-go/arm/compute:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
@@ -69,3 +52,20 @@ filegroup(
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"azure_common_test.go",
|
||||
"azure_dd_test.go",
|
||||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/util/exec:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -26,54 +26,43 @@ import (
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskDetacher struct {
|
||||
plugin *azureDataDiskPlugin
|
||||
cloud *azure.Cloud
|
||||
}
|
||||
|
||||
type azureDiskAttacher struct {
|
||||
host volume.VolumeHost
|
||||
azureProvider azureCloudProvider
|
||||
plugin *azureDataDiskPlugin
|
||||
cloud *azure.Cloud
|
||||
}
|
||||
|
||||
var _ volume.Attacher = &azureDiskAttacher{}
|
||||
|
||||
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
const (
|
||||
checkSleepDuration = time.Second
|
||||
)
|
||||
var _ volume.Detacher = &azureDiskDetacher{}
|
||||
|
||||
// acquire lock to get an lun number
|
||||
var getLunMutex = keymutex.NewKeyMutex()
|
||||
|
||||
// NewAttacher initializes an Attacher
|
||||
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureDiskAttacher{
|
||||
host: plugin.host,
|
||||
azureProvider: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
|
||||
func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure disk spec")
|
||||
return "", err
|
||||
}
|
||||
instanceid, err := attacher.azureProvider.InstanceID(nodeName)
|
||||
|
||||
instanceid, err := a.cloud.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to get azure instance id")
|
||||
return "", fmt.Errorf("failed to get azure instance id for node %q", nodeName)
|
||||
@@ -82,7 +71,12 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
lun, err := attacher.azureProvider.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
|
||||
diskController, err := getDiskController(a.plugin.host)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
|
||||
if err == cloudprovider.InstanceNotFound {
|
||||
// Log error and continue with attach
|
||||
glog.Warningf(
|
||||
@@ -98,13 +92,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node
|
||||
getLunMutex.LockKey(instanceid)
|
||||
defer getLunMutex.UnlockKey(instanceid)
|
||||
|
||||
lun, err = attacher.azureProvider.GetNextDiskLun(nodeName)
|
||||
lun, err = diskController.GetNextDiskLun(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no LUN available for instance %q", nodeName)
|
||||
return "", fmt.Errorf("all LUNs are used, cannot attach volume %q to instance %q", volumeSource.DiskName, instanceid)
|
||||
}
|
||||
glog.V(4).Infof("Trying to attach volume %q lun %d to node %q.", volumeSource.DataDiskURI, lun, nodeName)
|
||||
err = attacher.azureProvider.AttachDisk(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, lun, compute.CachingTypes(*volumeSource.CachingMode))
|
||||
if err == nil {
|
||||
glog.V(4).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
|
||||
} else {
|
||||
@@ -116,14 +111,14 @@ func (attacher *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.Node
|
||||
return strconv.Itoa(int(lun)), err
|
||||
}
|
||||
|
||||
func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
|
||||
volumesAttachedCheck := make(map[*volume.Spec]bool)
|
||||
volumeSpecMap := make(map[string]*volume.Spec)
|
||||
volumeIDList := []string{}
|
||||
for _, spec := range specs {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
glog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -131,11 +126,16 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node
|
||||
volumesAttachedCheck[spec] = true
|
||||
volumeSpecMap[volumeSource.DiskName] = spec
|
||||
}
|
||||
attachedResult, err := attacher.azureProvider.DisksAreAttached(volumeIDList, nodeName)
|
||||
|
||||
diskController, err := getDiskController(a.plugin.host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName)
|
||||
if err != nil {
|
||||
// Log error and continue with attach
|
||||
glog.Errorf(
|
||||
"Error checking if volumes (%v) are attached to current node (%q). err=%v",
|
||||
"azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v",
|
||||
volumeIDList, nodeName, err)
|
||||
return volumesAttachedCheck, err
|
||||
}
|
||||
@@ -144,71 +144,84 @@ func (attacher *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, node
|
||||
if !attached {
|
||||
spec := volumeSpecMap[volumeID]
|
||||
volumesAttachedCheck[spec] = false
|
||||
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
glog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
|
||||
}
|
||||
}
|
||||
return volumesAttachedCheck, nil
|
||||
}
|
||||
|
||||
// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned
|
||||
func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) {
|
||||
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
|
||||
var err error
|
||||
lun, err := strconv.Atoi(devicePath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("azureDisk - Wait for attach expect device path as a lun number, instead got: %s", devicePath)
|
||||
}
|
||||
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(lunStr) == 0 {
|
||||
return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName)
|
||||
}
|
||||
io := &osIOHandler{}
|
||||
scsiHostRescan(io)
|
||||
|
||||
lun, err := strconv.Atoi(lunStr)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err)
|
||||
}
|
||||
scsiHostRescan(&osIOHandler{})
|
||||
diskName := volumeSource.DiskName
|
||||
nodeName := a.plugin.host.GetHostName()
|
||||
newDevicePath := ""
|
||||
|
||||
err = wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
exe := exec.New()
|
||||
devicePath := ""
|
||||
|
||||
err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) {
|
||||
glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr)
|
||||
if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil {
|
||||
if len(devicePath) == 0 {
|
||||
glog.Warningf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr)
|
||||
return false, fmt.Errorf("cannot find attached Azure disk %q(lun %s) locally.", volumeSource.DiskName, lunStr)
|
||||
if newDevicePath, err = findDiskByLun(lun, io, exe); err != nil {
|
||||
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
|
||||
}
|
||||
glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath)
|
||||
|
||||
// did we find it?
|
||||
if newDevicePath != "" {
|
||||
// the curent sequence k8s uses for unformated disk (check-disk, mount, fail, mkfs.extX) hangs on
|
||||
// Azure Managed disk scsi interface. this is a hack and will be replaced once we identify and solve
|
||||
// the root case on Azure.
|
||||
formatIfNotFormatted(newDevicePath, *volumeSource.FSType)
|
||||
return true, nil
|
||||
} else {
|
||||
//Log error, if any, and continue checking periodically
|
||||
glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun)
|
||||
})
|
||||
return devicePath, err
|
||||
|
||||
return newDevicePath, err
|
||||
}
|
||||
|
||||
// GetDeviceMountPath finds the volume's mount path on the node
|
||||
func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
// to avoid name conflicts (similar *.vhd name)
|
||||
// we use hash diskUri and we use it as device mount target.
|
||||
// this is generalized for both managed and blob disks
|
||||
// we also prefix the hash with m/b based on disk kind
|
||||
func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil
|
||||
if volumeSource.Kind == nil { // this spec was constructed from info on the node
|
||||
pdPath := path.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volumeSource.DataDiskURI)
|
||||
return pdPath, nil
|
||||
}
|
||||
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
|
||||
}
|
||||
|
||||
// MountDevice runs mount command on the node to mount the volume
|
||||
func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
|
||||
mounter := attacher.host.GetMounter()
|
||||
mounter := attacher.plugin.host.GetMounter()
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
|
||||
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||
return err
|
||||
return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err)
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return err
|
||||
return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,47 +231,27 @@ func (attacher *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath str
|
||||
}
|
||||
|
||||
options := []string{}
|
||||
if spec.ReadOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
if notMnt {
|
||||
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Runner: exec.New()}
|
||||
mountOptions := volume.MountOptionFromSpec(spec, options...)
|
||||
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
|
||||
if err != nil {
|
||||
os.Remove(deviceMountPath)
|
||||
return err
|
||||
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
|
||||
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr)
|
||||
}
|
||||
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type azureDiskDetacher struct {
|
||||
mounter mount.Interface
|
||||
azureProvider azureCloudProvider
|
||||
}
|
||||
|
||||
var _ volume.Detacher = &azureDiskDetacher{}
|
||||
|
||||
// NewDetacher initializes a volume Detacher
|
||||
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureDiskDetacher{
|
||||
mounter: plugin.host.GetMounter(),
|
||||
azureProvider: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Detach detaches disk from Azure VM.
|
||||
func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeName) error {
|
||||
if diskName == "" {
|
||||
return fmt.Errorf("invalid disk to detach: %q", diskName)
|
||||
func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error {
|
||||
if diskURI == "" {
|
||||
return fmt.Errorf("invalid disk to detach: %q", diskURI)
|
||||
}
|
||||
instanceid, err := detacher.azureProvider.InstanceID(nodeName)
|
||||
|
||||
instanceid, err := d.cloud.InstanceID(nodeName)
|
||||
if err != nil {
|
||||
glog.Warningf("no instance id for node %q, skip detaching", nodeName)
|
||||
return nil
|
||||
@@ -267,22 +260,28 @@ func (detacher *azureDiskDetacher) Detach(diskName string, nodeName types.NodeNa
|
||||
instanceid = instanceid[(ind + 1):]
|
||||
}
|
||||
|
||||
glog.V(4).Infof("detach %v from node %q", diskName, nodeName)
|
||||
err = detacher.azureProvider.DetachDiskByName(diskName, "" /* diskURI */, nodeName)
|
||||
glog.V(4).Infof("detach %v from node %q", diskURI, nodeName)
|
||||
|
||||
diskController, err := getDiskController(d.plugin.host)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detach azure disk %q, err %v", diskName, err)
|
||||
return err
|
||||
}
|
||||
err = diskController.DetachDiskByName("", diskURI, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmountDevice unmounts the volume on the node
|
||||
func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
|
||||
volume := path.Base(deviceMountPath)
|
||||
if err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil {
|
||||
glog.Errorf("Error unmounting %q: %v", volume, err)
|
||||
return err
|
||||
err := volumeutil.UnmountPath(deviceMountPath, detacher.plugin.host.GetMounter())
|
||||
if err == nil {
|
||||
glog.V(4).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
|
||||
} else {
|
||||
return nil
|
||||
glog.Infof("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
342
pkg/volume/azure_dd/azure_common.go
Normal file
342
pkg/volume/azure_dd/azure_common.go
Normal file
@@ -0,0 +1,342 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
libstrings "strings"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultFSType = "ext4"
|
||||
defaultStorageAccountType = storage.StandardLRS
|
||||
)
|
||||
|
||||
type dataDisk struct {
|
||||
volume.MetricsProvider
|
||||
volumeName string
|
||||
diskName string
|
||||
podUID types.UID
|
||||
}
|
||||
|
||||
var (
|
||||
supportedCachingModes = sets.NewString(
|
||||
string(api.AzureDataDiskCachingNone),
|
||||
string(api.AzureDataDiskCachingReadOnly),
|
||||
string(api.AzureDataDiskCachingReadWrite))
|
||||
|
||||
supportedDiskKinds = sets.NewString(
|
||||
string(api.AzureSharedBlobDisk),
|
||||
string(api.AzureDedicatedBlobDisk),
|
||||
string(api.AzureManagedDisk))
|
||||
|
||||
supportedStorageAccountTypes = sets.NewString("Premium_LRS", "Standard_LRS")
|
||||
)
|
||||
|
||||
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
|
||||
return host.GetPodVolumeDir(uid, strings.EscapeQualifiedNameForDisk(azureDataDiskPluginName), volName)
|
||||
}
|
||||
|
||||
// creates a unique path for disks (even if they share the same *.vhd name)
|
||||
func makeGlobalPDPath(host volume.VolumeHost, diskUri string, isManaged bool) (string, error) {
|
||||
diskUri = libstrings.ToLower(diskUri) // always lower uri because users may enter it in caps.
|
||||
uniqueDiskNameTemplate := "%s%s"
|
||||
hashedDiskUri := azure.MakeCRC32(diskUri)
|
||||
prefix := "b"
|
||||
if isManaged {
|
||||
prefix = "m"
|
||||
}
|
||||
// "{m for managed b for blob}{hashed diskUri or DiskId depending on disk kind }"
|
||||
diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskUri)
|
||||
pdPath := path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, diskName)
|
||||
|
||||
return pdPath, nil
|
||||
}
|
||||
|
||||
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost) *dataDisk {
|
||||
var metricProvider volume.MetricsProvider
|
||||
if podUID != "" {
|
||||
metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host))
|
||||
}
|
||||
|
||||
return &dataDisk{
|
||||
MetricsProvider: metricProvider,
|
||||
volumeName: volumeName,
|
||||
diskName: diskName,
|
||||
podUID: podUID,
|
||||
}
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
|
||||
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
|
||||
return spec.Volume.AzureDisk, nil
|
||||
}
|
||||
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
|
||||
return spec.PersistentVolume.Spec.AzureDisk, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
|
||||
}
|
||||
|
||||
func normalizeFsType(fsType string) string {
|
||||
if fsType == "" {
|
||||
return defaultFSType
|
||||
}
|
||||
|
||||
return fsType
|
||||
}
|
||||
|
||||
func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
|
||||
if kind == "" {
|
||||
return v1.AzureDedicatedBlobDisk, nil
|
||||
}
|
||||
|
||||
if !supportedDiskKinds.Has(kind) {
|
||||
return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List())
|
||||
}
|
||||
|
||||
return v1.AzureDataDiskKind(kind), nil
|
||||
}
|
||||
|
||||
func normalizeStorageAccountType(storageAccountType string) (storage.SkuName, error) {
|
||||
if storageAccountType == "" {
|
||||
return defaultStorageAccountType, nil
|
||||
}
|
||||
|
||||
if !supportedStorageAccountTypes.Has(storageAccountType) {
|
||||
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedStorageAccountTypes.List())
|
||||
}
|
||||
|
||||
return storage.SkuName(storageAccountType), nil
|
||||
}
|
||||
|
||||
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
|
||||
if cachingMode == "" {
|
||||
return v1.AzureDataDiskCachingReadWrite, nil
|
||||
}
|
||||
|
||||
if !supportedCachingModes.Has(string(cachingMode)) {
|
||||
return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List())
|
||||
}
|
||||
|
||||
return cachingMode, nil
|
||||
}
|
||||
|
||||
type ioHandler interface {
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||
Readlink(name string) (string, error)
|
||||
}
|
||||
|
||||
//TODO: check if priming the iscsi interface is actually needed
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
|
||||
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return ioutil.WriteFile(filename, data, perm)
|
||||
}
|
||||
|
||||
func (handler *osIOHandler) Readlink(name string) (string, error) {
|
||||
return os.Readlink(name)
|
||||
}
|
||||
|
||||
// exclude those used by azure as resource and OS root in /dev/disk/azure
|
||||
func listAzureDiskPath(io ioHandler) []string {
|
||||
azureDiskPath := "/dev/disk/azure/"
|
||||
var azureDiskList []string
|
||||
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
diskPath := azureDiskPath + name
|
||||
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
|
||||
sd := link[(libstrings.LastIndex(link, "/") + 1):]
|
||||
azureDiskList = append(azureDiskList, sd)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
|
||||
return azureDiskList
|
||||
}
|
||||
|
||||
func scsiHostRescan(io ioHandler) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
if err = io.WriteFile(name, data, 0666); err != nil {
|
||||
glog.Warningf("failed to rescan scsi host %s", name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Warningf("failed to read %s, err %v", scsi_path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) {
|
||||
azureDisks := listAzureDiskPath(io)
|
||||
return findDiskByLunWithConstraint(lun, io, exe, azureDisks)
|
||||
}
|
||||
|
||||
// finds a device mounted to "current" node
|
||||
func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) {
|
||||
var err error
|
||||
sys_path := "/sys/bus/scsi/devices"
|
||||
if dirs, err := io.ReadDir(sys_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
// look for path like /sys/bus/scsi/devices/3:0:0:1
|
||||
arr := libstrings.Split(name, ":")
|
||||
if len(arr) < 4 {
|
||||
continue
|
||||
}
|
||||
// extract LUN from the path.
|
||||
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
|
||||
l, err := strconv.Atoi(arr[3])
|
||||
if err != nil {
|
||||
// unknown path format, continue to read the next one
|
||||
glog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
|
||||
continue
|
||||
}
|
||||
if lun == l {
|
||||
// find the matching LUN
|
||||
// read vendor and model to ensure it is a VHD disk
|
||||
vendor := path.Join(sys_path, name, "vendor")
|
||||
model := path.Join(sys_path, name, "model")
|
||||
out, err := exe.Command("cat", vendor, model).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("azure disk - failed to cat device vendor and model, err: %v", err)
|
||||
continue
|
||||
}
|
||||
matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", libstrings.ToUpper(string(out)))
|
||||
if err != nil || !matched {
|
||||
glog.V(4).Infof("azure disk - doesn't match VHD, output %v, error %v", string(out), err)
|
||||
continue
|
||||
}
|
||||
// find a disk, validate name
|
||||
dir := path.Join(sys_path, name, "block")
|
||||
if dev, err := io.ReadDir(dir); err == nil {
|
||||
found := false
|
||||
for _, diskName := range azureDisks {
|
||||
glog.V(12).Infof("azure disk - validating disk %q with sys disk %q", dev[0].Name(), diskName)
|
||||
if string(dev[0].Name()) == diskName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return "/dev/" + dev[0].Name(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func formatIfNotFormatted(disk string, fstype string) {
|
||||
notFormatted, err := diskLooksUnformatted(disk)
|
||||
if err == nil && notFormatted {
|
||||
args := []string{disk}
|
||||
// Disk is unformatted so format it.
|
||||
// Use 'ext4' as the default
|
||||
if len(fstype) == 0 {
|
||||
fstype = "ext4"
|
||||
}
|
||||
if fstype == "ext4" || fstype == "ext3" {
|
||||
args = []string{"-E", "lazy_itable_init=0,lazy_journal_init=0", "-F", disk}
|
||||
}
|
||||
glog.Infof("azureDisk - Disk %q appears to be unformatted, attempting to format as type: %q with options: %v", disk, fstype, args)
|
||||
runner := exec.New()
|
||||
cmd := runner.Command("mkfs."+fstype, args...)
|
||||
_, err := cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
// the disk has been formatted successfully try to mount it again.
|
||||
glog.Infof("azureDisk - Disk successfully formatted (mkfs): %s - %s %s", fstype, disk, "tt")
|
||||
}
|
||||
glog.Warningf("azureDisk - format of disk %q failed: type:(%q) target:(%q) options:(%q)error:(%v)", disk, fstype, "tt", "o", err)
|
||||
} else {
|
||||
if err != nil {
|
||||
glog.Warningf("azureDisk - Failed to check if the disk %s formatted with error %s, will attach anyway", disk, err)
|
||||
} else {
|
||||
glog.Infof("azureDisk - Disk %s already formatted, will not format", disk)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func diskLooksUnformatted(disk string) (bool, error) {
|
||||
args := []string{"-nd", "-o", "FSTYPE", disk}
|
||||
runner := exec.New()
|
||||
cmd := runner.Command("lsblk", args...)
|
||||
glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args)
|
||||
dataOut, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("Could not determine if disk %q is formatted (%v)", disk, err)
|
||||
return false, err
|
||||
}
|
||||
output := libstrings.TrimSpace(string(dataOut))
|
||||
return output == "", nil
|
||||
}
|
||||
|
||||
func getDiskController(host volume.VolumeHost) (DiskController, error) {
|
||||
cloudProvider := host.GetCloudProvider()
|
||||
az, ok := cloudProvider.(*azure.Cloud)
|
||||
|
||||
if !ok || az == nil {
|
||||
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
return az, nil
|
||||
}
|
||||
|
||||
func getCloud(host volume.VolumeHost) (*azure.Cloud, error) {
|
||||
cloudProvider := host.GetCloudProvider()
|
||||
az, ok := cloudProvider.(*azure.Cloud)
|
||||
|
||||
if !ok || az == nil {
|
||||
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
return az, nil
|
||||
}
|
||||
|
||||
func strFirstLetterToUpper(str string) string {
|
||||
if len(str) < 2 {
|
||||
return str
|
||||
}
|
||||
return libstrings.ToUpper(string(str[0])) + str[1:]
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
@@ -17,67 +17,62 @@ limitations under the License.
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/azure"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/keymutex"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// This is the primary entrypoint for volume plugins.
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
|
||||
}
|
||||
// interface exposed by the cloud provider implementing Disk functionlity
|
||||
type DiskController interface {
|
||||
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int, forceStandAlone bool) (string, error)
|
||||
DeleteBlobDisk(diskUri string, wasForced bool) error
|
||||
|
||||
type azureDataDiskPlugin struct {
|
||||
host volume.VolumeHost
|
||||
volumeLocks keymutex.KeyMutex
|
||||
}
|
||||
CreateManagedDisk(diskName string, storageAccountType storage.SkuName, sizeGB int, tags map[string]string) (string, error)
|
||||
DeleteManagedDisk(diskURI string) error
|
||||
|
||||
// Abstract interface to disk operations.
|
||||
// azure cloud provider should implement it
|
||||
type azureCloudProvider interface {
|
||||
// Attaches the disk to the host machine.
|
||||
AttachDisk(diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
AttachDisk(isManagedDisk bool, diskName, diskUri string, nodeName types.NodeName, lun int32, cachingMode compute.CachingTypes) error
|
||||
// Detaches the disk, identified by disk name or uri, from the host machine.
|
||||
DetachDiskByName(diskName, diskUri string, nodeName types.NodeName) error
|
||||
|
||||
// Check if a list of volumes are attached to the node with the specified NodeName
|
||||
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
|
||||
|
||||
// Get the LUN number of the disk that is attached to the host
|
||||
GetDiskLun(diskName, diskUri string, nodeName types.NodeName) (int32, error)
|
||||
// Get the next available LUN number to attach a new VHD
|
||||
GetNextDiskLun(nodeName types.NodeName) (int32, error)
|
||||
// InstanceID returns the cloud provider ID of the specified instance.
|
||||
InstanceID(nodeName types.NodeName) (string, error)
|
||||
|
||||
// Create a VHD blob
|
||||
CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error)
|
||||
CreateVolume(name, storageAccount string, storageAccountType storage.SkuName, location string, requestGB int) (string, string, int, error)
|
||||
// Delete a VHD blob
|
||||
DeleteVolume(name, uri string) error
|
||||
DeleteVolume(diskURI string) error
|
||||
}
|
||||
|
||||
type azureDataDiskPlugin struct {
|
||||
host volume.VolumeHost
|
||||
}
|
||||
|
||||
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
const (
|
||||
azureDataDiskPluginName = "kubernetes.io/azure-disk"
|
||||
)
|
||||
|
||||
func ProbeVolumePlugins() []volume.VolumePlugin {
|
||||
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
|
||||
plugin.host = host
|
||||
plugin.volumeLocks = keymutex.NewKeyMutex()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -91,7 +86,7 @@ func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
return volumeSource.DiskName, nil
|
||||
return volumeSource.DataDiskURI, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
@@ -117,281 +112,104 @@ func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessM
|
||||
}
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.Mounter, error) {
|
||||
// azures used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// azures used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
azure, err := getVolumeSource(spec)
|
||||
// NewAttacher initializes an Attacher
|
||||
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
|
||||
azure, err := getCloud(plugin.host)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure cloud in NewAttacher, plugin.host : %s", plugin.host.GetHostName())
|
||||
return nil, err
|
||||
}
|
||||
fsType := "ext4"
|
||||
if azure.FSType != nil {
|
||||
fsType = *azure.FSType
|
||||
}
|
||||
cachingMode := v1.AzureDataDiskCachingNone
|
||||
if azure.CachingMode != nil {
|
||||
cachingMode = *azure.CachingMode
|
||||
}
|
||||
readOnly := false
|
||||
if azure.ReadOnly != nil {
|
||||
readOnly = *azure.ReadOnly
|
||||
}
|
||||
diskName := azure.DiskName
|
||||
diskUri := azure.DataDiskURI
|
||||
return &azureDiskMounter{
|
||||
azureDisk: &azureDisk{
|
||||
podUID: podUID,
|
||||
volName: spec.Name(),
|
||||
diskName: diskName,
|
||||
diskUri: diskUri,
|
||||
cachingMode: cachingMode,
|
||||
mounter: mounter,
|
||||
plugin: plugin,
|
||||
},
|
||||
fsType: fsType,
|
||||
readOnly: readOnly,
|
||||
diskMounter: &mount.SafeFormatAndMount{Interface: plugin.host.GetMounter(), Runner: exec.New()}}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &azureDiskUnmounter{
|
||||
&azureDisk{
|
||||
podUID: podUID,
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
return &azureDiskAttacher{
|
||||
plugin: plugin,
|
||||
},
|
||||
cloud: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
|
||||
azure, err := getCloud(plugin.host)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &azureDiskDetacher{
|
||||
plugin: plugin,
|
||||
cloud: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
azVolume := &v1.Volume{
|
||||
Name: volName,
|
||||
|
||||
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host)
|
||||
|
||||
return &azureDiskDeleter{
|
||||
spec: spec,
|
||||
plugin: plugin,
|
||||
dataDisk: disk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
if len(options.PVC.Spec.AccessModes) == 0 {
|
||||
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
return &azureDiskProvisioner{
|
||||
plugin: plugin,
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
|
||||
volumeSource, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host)
|
||||
|
||||
return &azureDiskMounter{
|
||||
plugin: plugin,
|
||||
spec: spec,
|
||||
options: options,
|
||||
dataDisk: disk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
disk := makeDataDisk(volName, podUID, "", plugin.host)
|
||||
|
||||
return &azureDiskUnmounter{
|
||||
plugin: plugin,
|
||||
dataDisk: disk,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
|
||||
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
azureVolume := &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: sourceName,
|
||||
DataDiskURI: sourceName,
|
||||
},
|
||||
},
|
||||
}
|
||||
return volume.NewSpecFromVolume(azVolume), nil
|
||||
return volume.NewSpecFromVolume(azureVolume), nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
|
||||
mounter := plugin.host.GetMounter()
|
||||
return mount.GetMountRefs(mounter, deviceMountPath)
|
||||
}
|
||||
|
||||
type azureDisk struct {
|
||||
volName string
|
||||
podUID types.UID
|
||||
diskName string
|
||||
diskUri string
|
||||
cachingMode v1.AzureDataDiskCachingMode
|
||||
mounter mount.Interface
|
||||
plugin *azureDataDiskPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
type azureDiskMounter struct {
|
||||
*azureDisk
|
||||
// Filesystem type, optional.
|
||||
fsType string
|
||||
// Specifies whether the disk will be attached as read-only.
|
||||
readOnly bool
|
||||
// diskMounter provides the interface that is used to mount the actual block device.
|
||||
diskMounter *mount.SafeFormatAndMount
|
||||
}
|
||||
|
||||
var _ volume.Mounter = &azureDiskMounter{}
|
||||
|
||||
func (b *azureDiskMounter) GetAttributes() volume.Attributes {
|
||||
return volume.Attributes{
|
||||
ReadOnly: b.readOnly,
|
||||
Managed: !b.readOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Checks prior to mount operations to verify that the required components (binaries, etc.)
|
||||
// to mount the volume are available on the underlying node.
|
||||
// If not, it returns an error
|
||||
func (b *azureDiskMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetUp attaches the disk and bind mounts to the volume path.
|
||||
func (b *azureDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
// SetUpAt attaches the disk and bind mounts to the volume path.
|
||||
func (b *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
b.plugin.volumeLocks.LockKey(b.diskName)
|
||||
defer b.plugin.volumeLocks.UnlockKey(b.diskName)
|
||||
|
||||
// TODO: handle failed mounts here.
|
||||
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
glog.V(4).Infof("DataDisk set up: %s %v %v", dir, !notMnt, err)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Errorf("IsLikelyNotMountPoint failed: %v", err)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
glog.V(4).Infof("%s is a mount point", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
globalPDPath := makeGlobalPDPath(b.plugin.host, b.diskName)
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.V(4).Infof("Could not create directory %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
|
||||
options := []string{"bind"}
|
||||
if b.readOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
err = b.mounter.Mount(globalPDPath, dir, "", options)
|
||||
if err != nil {
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
|
||||
glog.Errorf("Failed to unmount: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if !notMnt {
|
||||
// This is very odd, we don't expect it. We'll try again next sync loop.
|
||||
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
os.Remove(dir)
|
||||
return err
|
||||
}
|
||||
|
||||
if !b.readOnly {
|
||||
volume.SetVolumeOwnership(b, fsGroup)
|
||||
}
|
||||
glog.V(3).Infof("Azure disk volume %s mounted to %s", b.diskName, dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func makeGlobalPDPath(host volume.VolumeHost, volume string) string {
|
||||
return path.Join(host.GetPluginDir(azureDataDiskPluginName), mount.MountsInGlobalPDPath, volume)
|
||||
}
|
||||
|
||||
func (azure *azureDisk) GetPath() string {
|
||||
name := azureDataDiskPluginName
|
||||
return azure.plugin.host.GetPodVolumeDir(azure.podUID, utilstrings.EscapeQualifiedNameForDisk(name), azure.volName)
|
||||
}
|
||||
|
||||
type azureDiskUnmounter struct {
|
||||
*azureDisk
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &azureDiskUnmounter{}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *azureDiskUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
// Unmounts the bind mount, and detaches the disk only if the PD
|
||||
// resource was the last reference to that disk on the kubelet.
|
||||
func (c *azureDiskUnmounter) TearDownAt(dir string) error {
|
||||
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error checking if mountpoint %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
glog.V(2).Info("Not mountpoint, deleting")
|
||||
return os.Remove(dir)
|
||||
}
|
||||
// lock the volume (and thus wait for any concurrrent SetUpAt to finish)
|
||||
c.plugin.volumeLocks.LockKey(c.diskName)
|
||||
defer c.plugin.volumeLocks.UnlockKey(c.diskName)
|
||||
refs, err := mount.GetMountRefs(c.mounter, dir)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting mountrefs for %s: %v", dir, err)
|
||||
return err
|
||||
}
|
||||
if len(refs) == 0 {
|
||||
glog.Errorf("Did not find pod-mount for %s during tear down", dir)
|
||||
return fmt.Errorf("%s is not mounted", dir)
|
||||
}
|
||||
c.diskName = path.Base(refs[0])
|
||||
glog.V(4).Infof("Found volume %s mounted to %s", c.diskName, dir)
|
||||
|
||||
// Unmount the bind-mount inside this pod
|
||||
if err := c.mounter.Unmount(dir); err != nil {
|
||||
glog.Errorf("Error unmounting dir %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
notMnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)
|
||||
if mntErr != nil {
|
||||
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
|
||||
return err
|
||||
}
|
||||
if notMnt {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
glog.Errorf("Error removing mountpoint %s %v", dir, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.AzureDiskVolumeSource, error) {
|
||||
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
|
||||
return spec.Volume.AzureDisk, nil
|
||||
}
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
|
||||
return spec.PersistentVolume.Spec.AzureDisk, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Spec does not reference an Azure disk volume type")
|
||||
}
|
||||
|
||||
// Return cloud provider
|
||||
func getAzureCloudProvider(cloudProvider cloudprovider.Interface) (azureCloudProvider, error) {
|
||||
azureCloudProvider, ok := cloudProvider.(*azure.Cloud)
|
||||
if !ok || azureCloudProvider == nil {
|
||||
return nil, fmt.Errorf("Failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
|
||||
}
|
||||
|
||||
return azureCloudProvider, nil
|
||||
m := plugin.host.GetMounter()
|
||||
return mount.GetMountRefs(m, deviceMountPath)
|
||||
}
|
||||
|
@@ -17,17 +17,11 @@ limitations under the License.
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/compute"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
)
|
||||
@@ -57,121 +51,5 @@ func TestCanSupport(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
fakeDiskName = "foo"
|
||||
fakeDiskUri = "https://azure/vhds/bar.vhd"
|
||||
fakeLun = 2
|
||||
)
|
||||
|
||||
type fakeAzureProvider struct {
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) AttachDisk(diskName, diskUri, vmName string, lun int32, cachingMode compute.CachingTypes) error {
|
||||
if diskName != fakeDiskName || diskUri != fakeDiskUri || lun != fakeLun {
|
||||
return fmt.Errorf("wrong disk")
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) DetachDiskByName(diskName, diskUri, vmName string) error {
|
||||
if diskName != fakeDiskName || diskUri != fakeDiskUri {
|
||||
return fmt.Errorf("wrong disk")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (fake *fakeAzureProvider) GetDiskLun(diskName, diskUri, vmName string) (int32, error) {
|
||||
return int32(fakeLun), nil
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) GetNextDiskLun(vmName string) (int32, error) {
|
||||
return fakeLun, nil
|
||||
}
|
||||
func (fake *fakeAzureProvider) InstanceID(name string) (string, error) {
|
||||
return "localhost", nil
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) CreateVolume(name, storageAccount, storageType, location string, requestGB int) (string, string, int, error) {
|
||||
return "", "", 0, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (fake *fakeAzureProvider) DeleteVolume(name, uri string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func TestPlugin(t *testing.T) {
|
||||
tmpDir, err := utiltesting.MkTmpdir("azure_ddTest")
|
||||
if err != nil {
|
||||
t.Fatalf("can't make a temp dir: %v", err)
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
plugMgr := volume.VolumePluginMgr{}
|
||||
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
|
||||
|
||||
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
|
||||
if err != nil {
|
||||
t.Errorf("Can't find the plugin by name")
|
||||
}
|
||||
fs := "ext4"
|
||||
ro := false
|
||||
caching := v1.AzureDataDiskCachingNone
|
||||
spec := &v1.Volume{
|
||||
Name: "vol1",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: fakeDiskName,
|
||||
DataDiskURI: fakeDiskUri,
|
||||
FSType: &fs,
|
||||
CachingMode: &caching,
|
||||
ReadOnly: &ro,
|
||||
},
|
||||
},
|
||||
}
|
||||
mounter, err := plug.(*azureDataDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Mounter: %v", err)
|
||||
}
|
||||
if mounter == nil {
|
||||
t.Errorf("Got a nil Mounter")
|
||||
}
|
||||
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-disk/vol1")
|
||||
path := mounter.GetPath()
|
||||
if path != volPath {
|
||||
t.Errorf("Got unexpected path: %s, should be %s", path, volPath)
|
||||
}
|
||||
|
||||
if err := mounter.SetUp(nil); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed, volume path not created: %s", path)
|
||||
} else {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
unmounter, err := plug.(*azureDataDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to make a new Unmounter: %v", err)
|
||||
}
|
||||
if unmounter == nil {
|
||||
t.Errorf("Got a nil Unmounter")
|
||||
}
|
||||
|
||||
if err := unmounter.TearDown(); err != nil {
|
||||
t.Errorf("Expected success, got: %v", err)
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
t.Errorf("TearDown() failed, volume path still exists: %s", path)
|
||||
} else if !os.IsNotExist(err) {
|
||||
t.Errorf("SetUp() failed: %v", err)
|
||||
}
|
||||
}
|
||||
// fakeAzureProvider type was removed because all functions were not used
|
||||
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.
|
||||
|
184
pkg/volume/azure_dd/azure_mounter.go
Normal file
184
pkg/volume/azure_dd/azure_mounter.go
Normal file
@@ -0,0 +1,184 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
type azureDiskMounter struct {
|
||||
*dataDisk
|
||||
spec *volume.Spec
|
||||
plugin *azureDataDiskPlugin
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
type azureDiskUnmounter struct {
|
||||
*dataDisk
|
||||
plugin *azureDataDiskPlugin
|
||||
}
|
||||
|
||||
var _ volume.Unmounter = &azureDiskUnmounter{}
|
||||
var _ volume.Mounter = &azureDiskMounter{}
|
||||
|
||||
func (m *azureDiskMounter) GetAttributes() volume.Attributes {
|
||||
volumeSource, _ := getVolumeSource(m.spec)
|
||||
return volume.Attributes{
|
||||
ReadOnly: *volumeSource.ReadOnly,
|
||||
Managed: !*volumeSource.ReadOnly,
|
||||
SupportsSELinux: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) CanMount() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) SetUp(fsGroup *int64) error {
|
||||
return m.SetUpAt(m.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) GetPath() string {
|
||||
return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host)
|
||||
}
|
||||
|
||||
func (m *azureDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
mounter := m.plugin.host.GetMounter()
|
||||
volumeSource, err := getVolumeSource(m.spec)
|
||||
|
||||
if err != nil {
|
||||
glog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
|
||||
return err
|
||||
}
|
||||
|
||||
diskName := volumeSource.DiskName
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
glog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
|
||||
return err
|
||||
}
|
||||
if !mountPoint {
|
||||
return fmt.Errorf("azureDisk - Not a mounting point for disk %s on %s", diskName, dir)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
glog.Infof("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
|
||||
return err
|
||||
}
|
||||
|
||||
options := []string{"bind"}
|
||||
|
||||
if *volumeSource.ReadOnly {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
|
||||
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mountErr := mounter.Mount(globalPDPath, dir, *volumeSource.FSType, options)
|
||||
// Everything in the following control flow is meant as an
|
||||
// attempt cleanup a failed setupAt (bind mount)
|
||||
if mountErr != nil {
|
||||
glog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr)
|
||||
}
|
||||
|
||||
if !mountPoint {
|
||||
if err = mounter.Unmount(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
|
||||
}
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
|
||||
}
|
||||
if !mountPoint {
|
||||
// not cool. leave for next sync loop.
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount(). Will try again next sync loop.", diskName, dir, mountErr)
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, err, mountErr)
|
||||
return mountErr
|
||||
}
|
||||
|
||||
if !*volumeSource.ReadOnly {
|
||||
volume.SetVolumeOwnership(m, fsGroup)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *azureDiskUnmounter) TearDown() error {
|
||||
return u.TearDownAt(u.GetPath())
|
||||
}
|
||||
|
||||
func (u *azureDiskUnmounter) TearDownAt(dir string) error {
|
||||
if pathExists, pathErr := util.PathExists(dir); pathErr != nil {
|
||||
return fmt.Errorf("Error checking if path exists: %v", pathErr)
|
||||
} else if !pathExists {
|
||||
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
|
||||
mounter := u.plugin.host.GetMounter()
|
||||
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err)
|
||||
}
|
||||
if mountPoint {
|
||||
if err := os.Remove(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err)
|
||||
}
|
||||
}
|
||||
if err := mounter.Unmount(dir); err != nil {
|
||||
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err)
|
||||
}
|
||||
mountPoint, err = mounter.IsLikelyNotMountPoint(dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err)
|
||||
}
|
||||
|
||||
if mountPoint {
|
||||
return os.Remove(dir)
|
||||
}
|
||||
|
||||
return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir")
|
||||
}
|
||||
|
||||
func (u *azureDiskUnmounter) GetPath() string {
|
||||
return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host)
|
||||
}
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -20,147 +20,182 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilstrings "k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
|
||||
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
|
||||
|
||||
type azureDiskDeleter struct {
|
||||
*azureDisk
|
||||
azureProvider azureCloudProvider
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return plugin.newDeleterInternal(spec, azure)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newDeleterInternal(spec *volume.Spec, azure azureCloudProvider) (volume.Deleter, error) {
|
||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk == nil {
|
||||
return nil, fmt.Errorf("invalid PV spec")
|
||||
}
|
||||
diskName := spec.PersistentVolume.Spec.AzureDisk.DiskName
|
||||
diskUri := spec.PersistentVolume.Spec.AzureDisk.DataDiskURI
|
||||
return &azureDiskDeleter{
|
||||
azureDisk: &azureDisk{
|
||||
volName: spec.Name(),
|
||||
diskName: diskName,
|
||||
diskUri: diskUri,
|
||||
plugin: plugin,
|
||||
},
|
||||
azureProvider: azure,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
|
||||
azure, err := getAzureCloudProvider(plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to get azure provider")
|
||||
return nil, err
|
||||
}
|
||||
if len(options.PVC.Spec.AccessModes) == 0 {
|
||||
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
|
||||
}
|
||||
return plugin.newProvisionerInternal(options, azure)
|
||||
}
|
||||
|
||||
func (plugin *azureDataDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, azure azureCloudProvider) (volume.Provisioner, error) {
|
||||
return &azureDiskProvisioner{
|
||||
azureDisk: &azureDisk{
|
||||
plugin: plugin,
|
||||
},
|
||||
azureProvider: azure,
|
||||
options: options,
|
||||
}, nil
|
||||
}
|
||||
|
||||
var _ volume.Deleter = &azureDiskDeleter{}
|
||||
|
||||
func (d *azureDiskDeleter) GetPath() string {
|
||||
name := azureDataDiskPluginName
|
||||
return d.plugin.host.GetPodVolumeDir(d.podUID, utilstrings.EscapeQualifiedNameForDisk(name), d.volName)
|
||||
}
|
||||
|
||||
func (d *azureDiskDeleter) Delete() error {
|
||||
glog.V(4).Infof("deleting volume %s", d.diskUri)
|
||||
return d.azureProvider.DeleteVolume(d.diskName, d.diskUri)
|
||||
}
|
||||
|
||||
type azureDiskProvisioner struct {
|
||||
*azureDisk
|
||||
azureProvider azureCloudProvider
|
||||
plugin *azureDataDiskPlugin
|
||||
options volume.VolumeOptions
|
||||
}
|
||||
|
||||
var _ volume.Provisioner = &azureDiskProvisioner{}
|
||||
type azureDiskDeleter struct {
|
||||
*dataDisk
|
||||
spec *volume.Spec
|
||||
plugin *azureDataDiskPlugin
|
||||
}
|
||||
|
||||
func (a *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(a.plugin.GetAccessModes(), a.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", a.options.PVC.Spec.AccessModes, a.plugin.GetAccessModes())
|
||||
var _ volume.Provisioner = &azureDiskProvisioner{}
|
||||
var _ volume.Deleter = &azureDiskDeleter{}
|
||||
|
||||
func (d *azureDiskDeleter) GetPath() string {
|
||||
return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host)
|
||||
}
|
||||
|
||||
func (d *azureDiskDeleter) Delete() error {
|
||||
volumeSource, err := getVolumeSource(d.spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var sku, location, account string
|
||||
diskController, err := getDiskController(d.plugin.host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wasStandAlone := (*volumeSource.Kind != v1.AzureSharedBlobDisk)
|
||||
managed := (*volumeSource.Kind == v1.AzureManagedDisk)
|
||||
|
||||
if managed {
|
||||
return diskController.DeleteManagedDisk(volumeSource.DataDiskURI)
|
||||
}
|
||||
|
||||
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI, wasStandAlone)
|
||||
}
|
||||
|
||||
func (p *azureDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
supportedModes := p.plugin.GetAccessModes()
|
||||
|
||||
// perform static validation first
|
||||
if p.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
|
||||
}
|
||||
|
||||
if len(p.options.PVC.Spec.AccessModes) > 1 {
|
||||
return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin")
|
||||
}
|
||||
|
||||
if len(p.options.PVC.Spec.AccessModes) == 1 {
|
||||
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
|
||||
return nil, fmt.Errorf("AzureDisk - mode %s is not supporetd by AzureDisk plugin supported mode is %s", p.options.PVC.Spec.AccessModes[0], supportedModes)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
location, account string
|
||||
storageAccountType, fsType string
|
||||
cachingMode v1.AzureDataDiskCachingMode
|
||||
strKind string
|
||||
err error
|
||||
)
|
||||
// maxLength = 79 - (4 for ".vhd") = 75
|
||||
name := volume.GenerateVolumeName(a.options.ClusterName, a.options.PVName, 75)
|
||||
capacity := a.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
name := volume.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
requestBytes := capacity.Value()
|
||||
requestGB := int(volume.RoundUpSize(requestBytes, 1024*1024*1024))
|
||||
|
||||
// Apply ProvisionerParameters (case-insensitive). We leave validation of
|
||||
// the values to the cloud provider.
|
||||
for k, v := range a.options.Parameters {
|
||||
for k, v := range p.options.Parameters {
|
||||
switch strings.ToLower(k) {
|
||||
case "skuname":
|
||||
sku = v
|
||||
storageAccountType = v
|
||||
case "location":
|
||||
location = v
|
||||
case "storageaccount":
|
||||
account = v
|
||||
case "storageaccounttype":
|
||||
storageAccountType = v
|
||||
case "kind":
|
||||
strKind = v
|
||||
case "cachingmode":
|
||||
cachingMode = v1.AzureDataDiskCachingMode(v)
|
||||
case "fstype":
|
||||
fsType = strings.ToLower(v)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, a.plugin.GetPluginName())
|
||||
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
|
||||
}
|
||||
}
|
||||
// TODO: implement c.options.ProvisionerSelector parsing
|
||||
if a.options.PVC.Spec.Selector != nil {
|
||||
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
|
||||
}
|
||||
|
||||
diskName, diskUri, sizeGB, err := a.azureProvider.CreateVolume(name, account, sku, location, requestGB)
|
||||
// normalize values
|
||||
fsType = normalizeFsType(fsType)
|
||||
skuName, err := normalizeStorageAccountType(storageAccountType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kind, err := normalizeKind(strFirstLetterToUpper(strKind))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diskController, err := getDiskController(p.plugin.host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// create disk
|
||||
diskURI := ""
|
||||
if kind == v1.AzureManagedDisk {
|
||||
diskURI, err = diskController.CreateManagedDisk(name, skuName, requestGB, *(p.options.CloudTags))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
forceStandAlone := (kind == v1.AzureDedicatedBlobDisk)
|
||||
if kind == v1.AzureDedicatedBlobDisk {
|
||||
if location != "" && account != "" {
|
||||
// use dedicated kind (by default) for compatibility
|
||||
_, diskURI, _, err = diskController.CreateVolume(name, account, skuName, location, requestGB)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if location != "" || account != "" {
|
||||
return nil, fmt.Errorf("AzureDisk - location(%s) and account(%s) must be both empty or specified for dedicated kind, only one value specified is not allowed",
|
||||
location, account)
|
||||
}
|
||||
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
diskURI, err = diskController.CreateBlobDisk(name, skuName, requestGB, forceStandAlone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.options.PVName,
|
||||
Name: p.options.PVName,
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: "azure-disk-dynamic-provisioner",
|
||||
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: a.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: a.options.PVC.Spec.AccessModes,
|
||||
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: supportedModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGB)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AzureDisk: &v1.AzureDiskVolumeSource{
|
||||
DiskName: diskName,
|
||||
DataDiskURI: diskUri,
|
||||
CachingMode: &cachingMode,
|
||||
DiskName: name,
|
||||
DataDiskURI: diskURI,
|
||||
Kind: &kind,
|
||||
FSType: &fsType,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@@ -1,145 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure_dd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/util/exec"
|
||||
)
|
||||
|
||||
type ioHandler interface {
|
||||
ReadDir(dirname string) ([]os.FileInfo, error)
|
||||
WriteFile(filename string, data []byte, perm os.FileMode) error
|
||||
Readlink(name string) (string, error)
|
||||
}
|
||||
|
||||
type osIOHandler struct{}
|
||||
|
||||
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
|
||||
return ioutil.ReadDir(dirname)
|
||||
}
|
||||
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
return ioutil.WriteFile(filename, data, perm)
|
||||
}
|
||||
func (handler *osIOHandler) Readlink(name string) (string, error) {
|
||||
return os.Readlink(name)
|
||||
}
|
||||
|
||||
// exclude those used by azure as resource and OS root in /dev/disk/azure
|
||||
func listAzureDiskPath(io ioHandler) []string {
|
||||
azureDiskPath := "/dev/disk/azure/"
|
||||
var azureDiskList []string
|
||||
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
diskPath := azureDiskPath + name
|
||||
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
|
||||
sd := link[(strings.LastIndex(link, "/") + 1):]
|
||||
azureDiskList = append(azureDiskList, sd)
|
||||
}
|
||||
}
|
||||
}
|
||||
glog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
|
||||
return azureDiskList
|
||||
}
|
||||
|
||||
// given a LUN find the VHD device path like /dev/sdd
|
||||
// exclude those disks used by Azure resources and OS root
|
||||
func findDiskByLun(lun int, io ioHandler, exe exec.Interface) (string, error) {
|
||||
azureDisks := listAzureDiskPath(io)
|
||||
return findDiskByLunWithConstraint(lun, io, exe, azureDisks)
|
||||
}
|
||||
|
||||
// look for device /dev/sdX and validate it is a VHD
|
||||
// return empty string if no disk is found
|
||||
func findDiskByLunWithConstraint(lun int, io ioHandler, exe exec.Interface, azureDisks []string) (string, error) {
|
||||
var err error
|
||||
sys_path := "/sys/bus/scsi/devices"
|
||||
if dirs, err := io.ReadDir(sys_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := f.Name()
|
||||
// look for path like /sys/bus/scsi/devices/3:0:0:1
|
||||
arr := strings.Split(name, ":")
|
||||
if len(arr) < 4 {
|
||||
continue
|
||||
}
|
||||
// extract LUN from the path.
|
||||
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
|
||||
l, err := strconv.Atoi(arr[3])
|
||||
if err != nil {
|
||||
// unknown path format, continue to read the next one
|
||||
glog.Errorf("failed to parse lun from %v (%v), err %v", arr[3], name, err)
|
||||
continue
|
||||
}
|
||||
if lun == l {
|
||||
// find the matching LUN
|
||||
// read vendor and model to ensure it is a VHD disk
|
||||
vendor := path.Join(sys_path, name, "vendor")
|
||||
model := path.Join(sys_path, name, "model")
|
||||
out, err := exe.Command("cat", vendor, model).CombinedOutput()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to cat device vendor and model, err: %v", err)
|
||||
continue
|
||||
}
|
||||
matched, err := regexp.MatchString("^MSFT[ ]{0,}\nVIRTUAL DISK[ ]{0,}\n$", strings.ToUpper(string(out)))
|
||||
if err != nil || !matched {
|
||||
glog.V(4).Infof("doesn't match VHD, output %v, error %v", string(out), err)
|
||||
continue
|
||||
}
|
||||
// find a disk, validate name
|
||||
dir := path.Join(sys_path, name, "block")
|
||||
if dev, err := io.ReadDir(dir); err == nil {
|
||||
found := false
|
||||
for _, diskName := range azureDisks {
|
||||
glog.V(12).Infof("validating disk %q with sys disk %q", dev[0].Name(), diskName)
|
||||
if string(dev[0].Name()) == diskName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return "/dev/" + dev[0].Name(), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
// rescan scsi bus
|
||||
func scsiHostRescan(io ioHandler) {
|
||||
scsi_path := "/sys/class/scsi_host/"
|
||||
if dirs, err := io.ReadDir(scsi_path); err == nil {
|
||||
for _, f := range dirs {
|
||||
name := scsi_path + f.Name() + "/scan"
|
||||
data := []byte("- - -")
|
||||
if err = io.WriteFile(name, data, 0666); err != nil {
|
||||
glog.Errorf("failed to rescan scsi host %s", name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
glog.Errorf("failed to read %s, err %v", scsi_path, err)
|
||||
}
|
||||
}
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -7,7 +7,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -11,7 +11,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -9,7 +9,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -4,7 +4,6 @@ approvers:
|
||||
reviewers:
|
||||
- thockin
|
||||
- smarterclayton
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -7,7 +7,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -8,7 +8,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -8,7 +8,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -8,7 +8,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -9,7 +9,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -8,7 +8,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -7,7 +7,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -12,7 +12,6 @@ reviewers:
|
||||
- deads2k
|
||||
- brendandburns
|
||||
- derekwaynecarr
|
||||
- bprashanth
|
||||
- pmorie
|
||||
- saad-ali
|
||||
- justinsb
|
||||
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- davidopp
|
||||
- saad-ali
|
||||
|
@@ -9,7 +9,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- kargakis
|
||||
- justinsb
|
||||
|
@@ -18,7 +18,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
|
@@ -12,6 +12,7 @@ go_library(
|
||||
srcs = [
|
||||
"clientset.go",
|
||||
"doc.go",
|
||||
"import.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
19
staging/src/k8s.io/client-go/kubernetes/import.go
Normal file
19
staging/src/k8s.io/client-go/kubernetes/import.go
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This file exists to enforce this clientset's vanity import path.
|
||||
|
||||
package kubernetes // import "k8s.io/client-go/kubernetes"
|
@@ -10,7 +10,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- erictune
|
||||
- davidopp
|
||||
- pmorie
|
||||
|
@@ -716,16 +716,21 @@ func createPD(zone string) (string, error) {
|
||||
} else if TestContext.Provider == "azure" {
|
||||
pdName := fmt.Sprintf("%s-%s", TestContext.Prefix, string(uuid.NewUUID()))
|
||||
azureCloud, err := GetAzureCloud()
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskUri, _, err := azureCloud.CreateVolume(pdName, "" /* account */, "" /* sku */, "" /* location */, 1 /* sizeGb */)
|
||||
if azureCloud.BlobDiskController == nil {
|
||||
return "", fmt.Errorf("BlobDiskController is nil, it's not expected.")
|
||||
}
|
||||
|
||||
diskUri, err := azureCloud.BlobDiskController.CreateBlobDisk(pdName, "standard_lrs", 1, false)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return diskUri, nil
|
||||
|
||||
} else {
|
||||
return "", fmt.Errorf("provider does not support volume creation")
|
||||
}
|
||||
@@ -770,8 +775,11 @@ func deletePD(pdName string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if azureCloud.BlobDiskController == nil {
|
||||
return fmt.Errorf("BlobDiskController is nil, it's not expected.")
|
||||
}
|
||||
diskName := pdName[(strings.LastIndex(pdName, "/") + 1):]
|
||||
err = azureCloud.DeleteVolume(diskName, pdName)
|
||||
err = azureCloud.BlobDiskController.DeleteBlobDisk(diskName, false)
|
||||
if err != nil {
|
||||
Logf("failed to delete Azure volume %q: %v", pdName, err)
|
||||
return err
|
||||
|
@@ -9,8 +9,10 @@ load(
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "large",
|
||||
srcs = [
|
||||
"apiserver_test.go",
|
||||
"main_test.go",
|
||||
"patch_test.go",
|
||||
],
|
||||
tags = [
|
||||
|
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -14,25 +14,14 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package azure
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
|
||||
"github.com/rubiojr/go-vhd/vhd"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
vhdHeaderSize = vhd.VHD_HEADER_SIZE
|
||||
)
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
err := binary.Write(b, binary.BigEndian, h)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
func TestMain(m *testing.M) {
|
||||
framework.EtcdMain(m.Run)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user