Addition of ScaleIO Kubernetes Volume Plugin
This commits implements the Kubernetes volume plugin allowing pods to seamlessly access and use data stored on ScaleIO volumes.
This commit is contained in:
420
vendor/github.com/codedellemc/goscaleio/.gitignore
generated
vendored
Normal file
420
vendor/github.com/codedellemc/goscaleio/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,420 @@
|
||||
*.*-e
|
||||
.*-e
|
||||
*-e
|
||||
vendor/
|
||||
.build/
|
||||
.rpmbuild/
|
||||
example
|
||||
release/*
|
||||
golang-crosscompile/
|
||||
.project
|
||||
*.d
|
||||
*.out
|
||||
.gaesdk/
|
||||
|
||||
# Created by https://www.gitignore.io
|
||||
|
||||
### Windows ###
|
||||
# Windows image file caches
|
||||
Thumbs.db
|
||||
ehthumbs.db
|
||||
|
||||
# Folder config file
|
||||
Desktop.ini
|
||||
|
||||
# Recycle Bin used on file shares
|
||||
$RECYCLE.BIN/
|
||||
|
||||
# Windows Installer files
|
||||
*.cab
|
||||
*.msi
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
# Windows shortcuts
|
||||
*.lnk
|
||||
|
||||
|
||||
### OSX ###
|
||||
.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
|
||||
### Eclipse ###
|
||||
*.pydevproject
|
||||
.metadata
|
||||
.gradle
|
||||
bin/
|
||||
tmp/
|
||||
*.tmp
|
||||
*.bak
|
||||
*.swp
|
||||
*~.nib
|
||||
local.properties
|
||||
.settings/
|
||||
.loadpath
|
||||
|
||||
# Eclipse Core
|
||||
.project
|
||||
|
||||
# External tool builders
|
||||
.externalToolBuilders/
|
||||
|
||||
# Locally stored "Eclipse launch configurations"
|
||||
*.launch
|
||||
|
||||
# CDT-specific
|
||||
.cproject
|
||||
|
||||
# JDT-specific (Eclipse Java Development Tools)
|
||||
.classpath
|
||||
|
||||
# PDT-specific
|
||||
.buildpath
|
||||
|
||||
# sbteclipse plugin
|
||||
.target
|
||||
|
||||
# TeXlipse plugin
|
||||
.texlipse
|
||||
|
||||
|
||||
### Go ###
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
|
||||
### SublimeText ###
|
||||
# cache files for sublime text
|
||||
*.tmlanguage.cache
|
||||
*.tmPreferences.cache
|
||||
*.stTheme.cache
|
||||
|
||||
# workspace files are user-specific
|
||||
*.sublime-workspace
|
||||
|
||||
# project files should be checked into the repository, unless a significant
|
||||
# proportion of contributors will probably not be using SublimeText
|
||||
# *.sublime-project
|
||||
|
||||
# sftp configuration file
|
||||
sftp-config.json
|
||||
|
||||
|
||||
### VisualStudio ###
|
||||
## Ignore Visual Studio temporary files, build results, and
|
||||
## files generated by popular Visual Studio add-ons.
|
||||
|
||||
# User-specific files
|
||||
*.suo
|
||||
*.user
|
||||
*.userosscache
|
||||
*.sln.docstates
|
||||
|
||||
# User-specific files (MonoDevelop/Xamarin Studio)
|
||||
*.userprefs
|
||||
|
||||
# Build results
|
||||
[Dd]ebug/
|
||||
[Dd]ebugPublic/
|
||||
[Rr]elease/
|
||||
[Rr]eleases/
|
||||
x64/
|
||||
x86/
|
||||
build/
|
||||
bld/
|
||||
[Bb]in/
|
||||
[Oo]bj/
|
||||
|
||||
# Visual Studo 2015 cache/options directory
|
||||
.vs/
|
||||
|
||||
# MSTest test Results
|
||||
[Tt]est[Rr]esult*/
|
||||
[Bb]uild[Ll]og.*
|
||||
|
||||
# NUNIT
|
||||
*.VisualState.xml
|
||||
TestResult.xml
|
||||
|
||||
# Build Results of an ATL Project
|
||||
[Dd]ebugPS/
|
||||
[Rr]eleasePS/
|
||||
dlldata.c
|
||||
|
||||
*_i.c
|
||||
*_p.c
|
||||
*_i.h
|
||||
*.ilk
|
||||
*.meta
|
||||
*.obj
|
||||
*.pch
|
||||
*.pdb
|
||||
*.pgc
|
||||
*.pgd
|
||||
*.rsp
|
||||
*.sbr
|
||||
*.tlb
|
||||
*.tli
|
||||
*.tlh
|
||||
*.tmp
|
||||
*.tmp_proj
|
||||
*.log
|
||||
*.vspscc
|
||||
*.vssscc
|
||||
.builds
|
||||
*.pidb
|
||||
*.svclog
|
||||
*.scc
|
||||
|
||||
# Chutzpah Test files
|
||||
_Chutzpah*
|
||||
|
||||
# Visual C++ cache files
|
||||
ipch/
|
||||
*.aps
|
||||
*.ncb
|
||||
*.opensdf
|
||||
*.sdf
|
||||
*.cachefile
|
||||
|
||||
# Visual Studio profiler
|
||||
*.psess
|
||||
*.vsp
|
||||
*.vspx
|
||||
|
||||
# TFS 2012 Local Workspace
|
||||
$tf/
|
||||
|
||||
# Guidance Automation Toolkit
|
||||
*.gpState
|
||||
|
||||
# ReSharper is a .NET coding add-in
|
||||
_ReSharper*/
|
||||
*.[Rr]e[Ss]harper
|
||||
*.DotSettings.user
|
||||
|
||||
# JustCode is a .NET coding addin-in
|
||||
.JustCode
|
||||
|
||||
# TeamCity is a build add-in
|
||||
_TeamCity*
|
||||
|
||||
# DotCover is a Code Coverage Tool
|
||||
*.dotCover
|
||||
|
||||
# NCrunch
|
||||
_NCrunch_*
|
||||
.*crunch*.local.xml
|
||||
|
||||
# MightyMoose
|
||||
*.mm.*
|
||||
AutoTest.Net/
|
||||
|
||||
# Web workbench (sass)
|
||||
.sass-cache/
|
||||
|
||||
# Installshield output folder
|
||||
[Ee]xpress/
|
||||
|
||||
# DocProject is a documentation generator add-in
|
||||
DocProject/buildhelp/
|
||||
DocProject/Help/*.HxT
|
||||
DocProject/Help/*.HxC
|
||||
DocProject/Help/*.hhc
|
||||
DocProject/Help/*.hhk
|
||||
DocProject/Help/*.hhp
|
||||
DocProject/Help/Html2
|
||||
DocProject/Help/html
|
||||
|
||||
# Click-Once directory
|
||||
publish/
|
||||
|
||||
# Publish Web Output
|
||||
*.[Pp]ublish.xml
|
||||
*.azurePubxml
|
||||
# TODO: Comment the next line if you want to checkin your web deploy settings
|
||||
# but database connection strings (with potential passwords) will be unencrypted
|
||||
*.pubxml
|
||||
*.publishproj
|
||||
|
||||
# NuGet Packages
|
||||
*.nupkg
|
||||
# The packages folder can be ignored because of Package Restore
|
||||
**/packages/*
|
||||
# except build/, which is used as an MSBuild target.
|
||||
!**/packages/build/
|
||||
# Uncomment if necessary however generally it will be regenerated when needed
|
||||
#!**/packages/repositories.config
|
||||
|
||||
# Windows Azure Build Output
|
||||
csx/
|
||||
*.build.csdef
|
||||
|
||||
# Windows Store app package directory
|
||||
AppPackages/
|
||||
|
||||
# Others
|
||||
*.[Cc]ache
|
||||
ClientBin/
|
||||
[Ss]tyle[Cc]op.*
|
||||
~$*
|
||||
*~
|
||||
*.dbmdl
|
||||
*.dbproj.schemaview
|
||||
*.pfx
|
||||
*.publishsettings
|
||||
node_modules/
|
||||
bower_components/
|
||||
|
||||
# RIA/Silverlight projects
|
||||
Generated_Code/
|
||||
|
||||
# Backup & report files from converting an old project file
|
||||
# to a newer Visual Studio version. Backup files are not needed,
|
||||
# because we have git ;-)
|
||||
_UpgradeReport_Files/
|
||||
Backup*/
|
||||
UpgradeLog*.XML
|
||||
UpgradeLog*.htm
|
||||
|
||||
# SQL Server files
|
||||
*.mdf
|
||||
*.ldf
|
||||
|
||||
# Business Intelligence projects
|
||||
*.rdl.data
|
||||
*.bim.layout
|
||||
*.bim_*.settings
|
||||
|
||||
# Microsoft Fakes
|
||||
FakesAssemblies/
|
||||
|
||||
# Node.js Tools for Visual Studio
|
||||
.ntvs_analysis.dat
|
||||
|
||||
# Visual Studio 6 build log
|
||||
*.plg
|
||||
|
||||
# Visual Studio 6 workspace options file
|
||||
*.opt
|
||||
|
||||
|
||||
### Maven ###
|
||||
target/
|
||||
pom.xml.tag
|
||||
pom.xml.releaseBackup
|
||||
pom.xml.versionsBackup
|
||||
pom.xml.next
|
||||
release.properties
|
||||
dependency-reduced-pom.xml
|
||||
buildNumber.properties
|
||||
|
||||
|
||||
### Java ###
|
||||
*.class
|
||||
|
||||
# Mobile Tools for Java (J2ME)
|
||||
.mtj.tmp/
|
||||
|
||||
# Package Files #
|
||||
*.jar
|
||||
*.war
|
||||
*.ear
|
||||
|
||||
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
|
||||
hs_err_pid*
|
||||
|
||||
|
||||
### Intellij ###
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm
|
||||
|
||||
*.iml
|
||||
|
||||
## Directory-based project format:
|
||||
.idea/
|
||||
# if you remove the above rule, at least ignore the following:
|
||||
|
||||
# User-specific stuff:
|
||||
# .idea/workspace.xml
|
||||
# .idea/tasks.xml
|
||||
# .idea/dictionaries
|
||||
|
||||
# Sensitive or high-churn files:
|
||||
# .idea/dataSources.ids
|
||||
# .idea/dataSources.xml
|
||||
# .idea/sqlDataSources.xml
|
||||
# .idea/dynamic.xml
|
||||
# .idea/uiDesigner.xml
|
||||
|
||||
# Gradle:
|
||||
# .idea/gradle.xml
|
||||
# .idea/libraries
|
||||
|
||||
# Mongo Explorer plugin:
|
||||
# .idea/mongoSettings.xml
|
||||
|
||||
## File-based project format:
|
||||
*.ipr
|
||||
*.iws
|
||||
|
||||
## Plugin-specific files:
|
||||
|
||||
# IntelliJ
|
||||
/out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
10
vendor/github.com/codedellemc/goscaleio/.travis.yml
generated
vendored
Normal file
10
vendor/github.com/codedellemc/goscaleio/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
language: go
|
||||
|
||||
install:
|
||||
- go get -t ./...
|
||||
- go get code.google.com/p/go.tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- PATH="$HOME/gopath/bin:$PATH"
|
||||
- script/coverage --coveralls
|
202
vendor/github.com/codedellemc/goscaleio/LICENSE
generated
vendored
Normal file
202
vendor/github.com/codedellemc/goscaleio/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
80
vendor/github.com/codedellemc/goscaleio/README.md
generated
vendored
Normal file
80
vendor/github.com/codedellemc/goscaleio/README.md
generated
vendored
Normal file
@@ -0,0 +1,80 @@
|
||||
# Goscaleio
|
||||
The *Goscaleio* project represents API bindings that can be used to provide ScaleIO functionality into other Go applications.
|
||||
|
||||
|
||||
- [Current State](#state)
|
||||
- [Usage](#usage)
|
||||
- [Licensing](#licensing)
|
||||
- [Support](#support)
|
||||
|
||||
## Use Cases
|
||||
Any application written in Go can take advantage of these bindings. Specifically, things that are involved in monitoring, management, and more specifically infrastructrue as code would find these bindings relevant.
|
||||
|
||||
|
||||
## <a id="state">Current State</a>
|
||||
Early build-out and pre-documentation stages. The basics around authentication and object models are there.
|
||||
|
||||
|
||||
## <a id="usage">Usage</a>
|
||||
|
||||
### Logging in
|
||||
|
||||
client, err := goscaleio.NewClient()
|
||||
if err != nil {
|
||||
log.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
_, err = client.Authenticate(&goscaleio.ConfigConnect{endpoint, username, password})
|
||||
if err != nil {
|
||||
log.Fatalf("error authenticating: %v", err)
|
||||
}
|
||||
|
||||
fmt.Println("Successfuly logged in to ScaleIO Gateway at", client.SIOEndpoint.String())
|
||||
|
||||
|
||||
### Reusing the authentication token
|
||||
Once a client struct is created via the ```NewClient()``` function, you can replace the ```Token``` with the saved token.
|
||||
|
||||
client, err := goscaleio.NewClient()
|
||||
if err != nil {
|
||||
log.Fatalf("error with NewClient: %s", err)
|
||||
}
|
||||
|
||||
client.Token = oldToken
|
||||
|
||||
### Get Systems
|
||||
Retrieving systems is the first step after authentication which enables you to work with other necessary methods.
|
||||
|
||||
#### All Systems
|
||||
|
||||
systems, err := client.GetInstance()
|
||||
if err != nil {
|
||||
log.Fatalf("err: problem getting instance %v", err)
|
||||
}
|
||||
|
||||
#### Find a System
|
||||
|
||||
system, err := client.FindSystem(systemid,"","")
|
||||
if err != nil {
|
||||
log.Fatalf("err: problem getting instance %v", err)
|
||||
}
|
||||
|
||||
|
||||
### Get Protection Domains
|
||||
Once you have a ```System``` struct you can then get other things like ```Protection Domains```.
|
||||
|
||||
protectiondomains, err := system.GetProtectionDomain()
|
||||
if err != nil {
|
||||
log.Fatalf("error getting protection domains: %v", err)
|
||||
}
|
||||
|
||||
|
||||
<a id="licensing">Licensing</a>
|
||||
---------
|
||||
Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License. You may obtain a copy of the License at <http://www.apache.org/licenses/LICENSE-2.0>
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
|
||||
|
||||
<a id="support">Support</a>
|
||||
-------
|
||||
Please file bugs and issues at the Github issues page. For more general discussions you can contact the EMC Code team at <a href="https://groups.google.com/forum/#!forum/emccode-users">Google Groups</a> or tagged with **EMC** on <a href="https://stackoverflow.com">Stackoverflow.com</a>. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process.
|
401
vendor/github.com/codedellemc/goscaleio/api.go
generated
vendored
Normal file
401
vendor/github.com/codedellemc/goscaleio/api.go
generated
vendored
Normal file
@@ -0,0 +1,401 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
log "github.com/Sirupsen/logrus"
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Token string
|
||||
SIOEndpoint url.URL
|
||||
Http http.Client
|
||||
Insecure string
|
||||
ShowBody bool
|
||||
configConnect *ConfigConnect
|
||||
}
|
||||
|
||||
type Cluster struct {
|
||||
}
|
||||
|
||||
type ConfigConnect struct {
|
||||
Endpoint string
|
||||
Version string
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
type ClientPersistent struct {
|
||||
configConnect *ConfigConnect
|
||||
client *Client
|
||||
}
|
||||
|
||||
func (client *Client) getVersion() (string, error) {
|
||||
endpoint := client.SIOEndpoint
|
||||
endpoint.Path = "/api/version"
|
||||
|
||||
req := client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", client.Token)
|
||||
|
||||
resp, err := client.retryCheckResp(&client.Http, req)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.New("error reading body")
|
||||
}
|
||||
|
||||
version := string(bs)
|
||||
|
||||
if client.ShowBody {
|
||||
log.WithField("body", version).Debug(
|
||||
"printing version message body")
|
||||
}
|
||||
|
||||
version = strings.TrimRight(version, `"`)
|
||||
version = strings.TrimLeft(version, `"`)
|
||||
|
||||
versionRX := regexp.MustCompile(`^(\d+?\.\d+?).*$`)
|
||||
if m := versionRX.FindStringSubmatch(version); len(m) > 0 {
|
||||
return m[1], nil
|
||||
}
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func (client *Client) updateVersion() error {
|
||||
|
||||
version, err := client.getVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.configConnect.Version = version
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *Client) Authenticate(configConnect *ConfigConnect) (Cluster, error) {
|
||||
|
||||
configConnect.Version = client.configConnect.Version
|
||||
client.configConnect = configConnect
|
||||
|
||||
endpoint := client.SIOEndpoint
|
||||
endpoint.Path += "/login"
|
||||
|
||||
req := client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth(configConnect.Username, configConnect.Password)
|
||||
|
||||
httpClient := &client.Http
|
||||
resp, errBody, err := client.checkResp(httpClient.Do(req))
|
||||
if errBody == nil && err != nil {
|
||||
return Cluster{}, err
|
||||
} else if errBody != nil && err != nil {
|
||||
if resp == nil {
|
||||
return Cluster{}, errors.New("Problem getting response from endpoint")
|
||||
}
|
||||
return Cluster{}, errors.New(errBody.Message)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return Cluster{}, errors.New("error reading body")
|
||||
}
|
||||
|
||||
token := string(bs)
|
||||
|
||||
if client.ShowBody {
|
||||
log.WithField("body", token).Debug(
|
||||
"printing authentication message body")
|
||||
}
|
||||
|
||||
token = strings.TrimRight(token, `"`)
|
||||
token = strings.TrimLeft(token, `"`)
|
||||
client.Token = token
|
||||
|
||||
if client.configConnect.Version == "" {
|
||||
err = client.updateVersion()
|
||||
if err != nil {
|
||||
return Cluster{}, errors.New("error getting version of ScaleIO")
|
||||
}
|
||||
}
|
||||
|
||||
return Cluster{}, nil
|
||||
}
|
||||
|
||||
//https://github.com/chrislusf/teeproxy/blob/master/teeproxy.go
|
||||
type nopCloser struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (nopCloser) Close() error { return nil }
|
||||
|
||||
func DuplicateRequest(request *http.Request) (request1 *http.Request, request2 *http.Request) {
|
||||
request1 = &http.Request{
|
||||
Method: request.Method,
|
||||
URL: request.URL,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: request.Header,
|
||||
Host: request.Host,
|
||||
ContentLength: request.ContentLength,
|
||||
}
|
||||
request2 = &http.Request{
|
||||
Method: request.Method,
|
||||
URL: request.URL,
|
||||
Proto: "HTTP/1.1",
|
||||
ProtoMajor: 1,
|
||||
ProtoMinor: 1,
|
||||
Header: request.Header,
|
||||
Host: request.Host,
|
||||
ContentLength: request.ContentLength,
|
||||
}
|
||||
|
||||
if request.Body != nil {
|
||||
b1 := new(bytes.Buffer)
|
||||
b2 := new(bytes.Buffer)
|
||||
w := io.MultiWriter(b1, b2)
|
||||
io.Copy(w, request.Body)
|
||||
request1.Body = nopCloser{b1}
|
||||
request2.Body = nopCloser{b2}
|
||||
|
||||
defer request.Body.Close()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (client *Client) retryCheckResp(httpClient *http.Client, req *http.Request) (*http.Response, error) {
|
||||
|
||||
req1, req2 := DuplicateRequest(req)
|
||||
resp, errBody, err := client.checkResp(httpClient.Do(req1))
|
||||
if errBody == nil && err != nil {
|
||||
return &http.Response{}, err
|
||||
} else if errBody != nil && err != nil {
|
||||
if resp == nil {
|
||||
return nil, errors.New("Problem getting response from endpoint")
|
||||
}
|
||||
|
||||
if resp.StatusCode == 401 && errBody.MajorErrorCode == 0 {
|
||||
_, err := client.Authenticate(client.configConnect)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error re-authenticating: %s", err)
|
||||
}
|
||||
|
||||
ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
|
||||
req2.SetBasicAuth("", client.Token)
|
||||
resp, errBody, err = client.checkResp(httpClient.Do(req2))
|
||||
if err != nil {
|
||||
return &http.Response{}, errors.New(errBody.Message)
|
||||
}
|
||||
} else {
|
||||
return &http.Response{}, errors.New(errBody.Message)
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (client *Client) checkResp(resp *http.Response, err error) (*http.Response, *types.Error, error) {
|
||||
if err != nil {
|
||||
return resp, &types.Error{}, err
|
||||
}
|
||||
|
||||
switch i := resp.StatusCode; {
|
||||
// Valid request, return the response.
|
||||
case i == 200 || i == 201 || i == 202 || i == 204:
|
||||
return resp, &types.Error{}, nil
|
||||
// Invalid request, parse the XML error returned and return it.
|
||||
case i == 400 || i == 401 || i == 403 || i == 404 || i == 405 || i == 406 || i == 409 || i == 415 || i == 500 || i == 503 || i == 504:
|
||||
errBody, err := client.parseErr(resp)
|
||||
return resp, errBody, err
|
||||
// Unhandled response.
|
||||
default:
|
||||
return nil, &types.Error{}, fmt.Errorf("unhandled API response, please report this issue, status code: %s", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
func (client *Client) decodeBody(resp *http.Response, out interface{}) error {
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if client.ShowBody {
|
||||
var prettyJSON bytes.Buffer
|
||||
_ = json.Indent(&prettyJSON, body, "", " ")
|
||||
log.WithField("body", prettyJSON.String()).Debug(
|
||||
"print decoded body")
|
||||
}
|
||||
|
||||
if err = json.Unmarshal(body, &out); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *Client) parseErr(resp *http.Response) (*types.Error, error) {
|
||||
|
||||
errBody := new(types.Error)
|
||||
|
||||
// if there was an error decoding the body, just return that
|
||||
if err := client.decodeBody(resp, errBody); err != nil {
|
||||
return &types.Error{}, fmt.Errorf("error parsing error body for non-200 request: %s", err)
|
||||
}
|
||||
|
||||
return errBody, fmt.Errorf("API (%d) Error: %d: %s", resp.StatusCode, errBody.MajorErrorCode, errBody.Message)
|
||||
}
|
||||
|
||||
func (c *Client) NewRequest(params map[string]string, method string, u url.URL, body io.Reader) *http.Request {
|
||||
|
||||
if log.GetLevel() == log.DebugLevel && c.ShowBody && body != nil {
|
||||
buf := new(bytes.Buffer)
|
||||
buf.ReadFrom(body)
|
||||
log.WithField("body", buf.String()).Debug("print new request body")
|
||||
}
|
||||
|
||||
p := url.Values{}
|
||||
|
||||
for k, v := range params {
|
||||
p.Add(k, v)
|
||||
}
|
||||
|
||||
u.RawQuery = p.Encode()
|
||||
|
||||
req, _ := http.NewRequest(method, u.String(), body)
|
||||
|
||||
return req
|
||||
|
||||
}
|
||||
|
||||
func NewClient() (client *Client, err error) {
|
||||
return NewClientWithArgs(
|
||||
os.Getenv("GOSCALEIO_ENDPOINT"),
|
||||
os.Getenv("GOSCALEIO_VERSION"),
|
||||
os.Getenv("GOSCALEIO_INSECURE") == "true",
|
||||
os.Getenv("GOSCALEIO_USECERTS") == "true")
|
||||
}
|
||||
|
||||
func NewClientWithArgs(
|
||||
endpoint string,
|
||||
version string,
|
||||
insecure,
|
||||
useCerts bool) (client *Client, err error) {
|
||||
|
||||
fields := map[string]interface{}{
|
||||
"endpoint": endpoint,
|
||||
"insecure": insecure,
|
||||
"useCerts": useCerts,
|
||||
"version": version,
|
||||
}
|
||||
|
||||
var uri *url.URL
|
||||
|
||||
if endpoint != "" {
|
||||
uri, err = url.ParseRequestURI(endpoint)
|
||||
if err != nil {
|
||||
return &Client{},
|
||||
withFieldsE(fields, "error parsing endpoint", err)
|
||||
}
|
||||
} else {
|
||||
return &Client{},
|
||||
withFields(fields, "endpoint is required")
|
||||
}
|
||||
|
||||
client = &Client{
|
||||
SIOEndpoint: *uri,
|
||||
Http: http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSHandshakeTimeout: 120 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: insecure,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if useCerts {
|
||||
pool := x509.NewCertPool()
|
||||
pool.AppendCertsFromPEM(pemCerts)
|
||||
|
||||
client.Http.Transport = &http.Transport{
|
||||
TLSHandshakeTimeout: 120 * time.Second,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: pool,
|
||||
InsecureSkipVerify: insecure,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
client.configConnect = &ConfigConnect{
|
||||
Version: version,
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func GetLink(links []*types.Link, rel string) (*types.Link, error) {
|
||||
for _, link := range links {
|
||||
if link.Rel == rel {
|
||||
return link, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &types.Link{}, errors.New("Couldn't find link")
|
||||
}
|
||||
|
||||
func withFields(fields map[string]interface{}, message string) error {
|
||||
return withFieldsE(fields, message, nil)
|
||||
}
|
||||
|
||||
func withFieldsE(
|
||||
fields map[string]interface{}, message string, inner error) error {
|
||||
|
||||
if fields == nil {
|
||||
fields = make(map[string]interface{})
|
||||
}
|
||||
|
||||
if inner != nil {
|
||||
fields["inner"] = inner
|
||||
}
|
||||
|
||||
x := 0
|
||||
l := len(fields)
|
||||
|
||||
var b bytes.Buffer
|
||||
for k, v := range fields {
|
||||
if x < l-1 {
|
||||
b.WriteString(fmt.Sprintf("%s=%v,", k, v))
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s=%v", k, v))
|
||||
}
|
||||
x = x + 1
|
||||
}
|
||||
|
||||
return newf("%s %s", message, b.String())
|
||||
}
|
||||
|
||||
func newf(format string, a ...interface{}) error {
|
||||
return errors.New(fmt.Sprintf(format, a))
|
||||
}
|
4232
vendor/github.com/codedellemc/goscaleio/certs.go
generated
vendored
Normal file
4232
vendor/github.com/codedellemc/goscaleio/certs.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
110
vendor/github.com/codedellemc/goscaleio/device.go
generated
vendored
Normal file
110
vendor/github.com/codedellemc/goscaleio/device.go
generated
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type Device struct {
|
||||
Device *types.Device
|
||||
client *Client
|
||||
}
|
||||
|
||||
func NewDevice(client *Client) *Device {
|
||||
return &Device{
|
||||
Device: new(types.Device),
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func NewDeviceEx(client *Client, device *types.Device) *Device {
|
||||
return &Device{
|
||||
Device: device,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (storagePool *StoragePool) AttachDevice(path string, sdsID string) (string, error) {
|
||||
endpoint := storagePool.client.SIOEndpoint
|
||||
|
||||
deviceParam := &types.DeviceParam{}
|
||||
deviceParam.Name = path
|
||||
deviceParam.DeviceCurrentPathname = path
|
||||
deviceParam.StoragePoolID = storagePool.StoragePool.ID
|
||||
deviceParam.SdsID = sdsID
|
||||
deviceParam.TestMode = "testAndActivate"
|
||||
|
||||
jsonOutput, err := json.Marshal(&deviceParam)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
endpoint.Path = fmt.Sprintf("/api/types/Device/instances")
|
||||
|
||||
req := storagePool.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", storagePool.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
|
||||
resp, err := storagePool.client.retryCheckResp(&storagePool.client.Http, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.New("error reading body")
|
||||
}
|
||||
|
||||
var dev types.DeviceResp
|
||||
err = json.Unmarshal(bs, &dev)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return dev.ID, nil
|
||||
}
|
||||
|
||||
func (storagePool *StoragePool) GetDevice() (devices []types.Device, err error) {
|
||||
endpoint := storagePool.client.SIOEndpoint
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/StoragePool::%v/relationships/Device", storagePool.StoragePool.ID)
|
||||
|
||||
req := storagePool.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", storagePool.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
|
||||
resp, err := storagePool.client.retryCheckResp(&storagePool.client.Http, req)
|
||||
if err != nil {
|
||||
return []types.Device{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = storagePool.client.decodeBody(resp, &devices); err != nil {
|
||||
return []types.Device{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
return devices, nil
|
||||
}
|
||||
|
||||
func (storagePool *StoragePool) FindDevice(field, value string) (device *types.Device, err error) {
|
||||
devices, err := storagePool.GetDevice()
|
||||
if err != nil {
|
||||
return &types.Device{}, nil
|
||||
}
|
||||
|
||||
for _, device := range devices {
|
||||
valueOf := reflect.ValueOf(device)
|
||||
switch {
|
||||
case reflect.Indirect(valueOf).FieldByName(field).String() == value:
|
||||
return &device, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &types.Device{}, errors.New("Couldn't find DEV")
|
||||
}
|
228
vendor/github.com/codedellemc/goscaleio/instance.go
generated
vendored
Normal file
228
vendor/github.com/codedellemc/goscaleio/instance.go
generated
vendored
Normal file
@@ -0,0 +1,228 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
func (client *Client) GetInstance(systemhref string) (systems []*types.System, err error) {
|
||||
|
||||
endpoint := client.SIOEndpoint
|
||||
if systemhref == "" {
|
||||
endpoint.Path += "/types/System/instances"
|
||||
} else {
|
||||
endpoint.Path = systemhref
|
||||
}
|
||||
|
||||
req := client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+client.configConnect.Version)
|
||||
|
||||
resp, err := client.retryCheckResp(&client.Http, req)
|
||||
if err != nil {
|
||||
return []*types.System{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if systemhref == "" {
|
||||
if err = client.decodeBody(resp, &systems); err != nil {
|
||||
return []*types.System{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
} else {
|
||||
system := &types.System{}
|
||||
if err = client.decodeBody(resp, &system); err != nil {
|
||||
return []*types.System{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
systems = append(systems, system)
|
||||
}
|
||||
|
||||
// bs, err := ioutil.ReadAll(resp.Body)
|
||||
// if err != nil {
|
||||
// return types.Systems{}, errors.New("error reading body")
|
||||
// }
|
||||
|
||||
return systems, nil
|
||||
}
|
||||
|
||||
func (client *Client) GetVolume(volumehref, volumeid, ancestorvolumeid, volumename string, getSnapshots bool) (volumes []*types.Volume, err error) {
|
||||
|
||||
endpoint := client.SIOEndpoint
|
||||
|
||||
if volumename != "" {
|
||||
volumeid, err = client.FindVolumeID(volumename)
|
||||
if err != nil && err.Error() == "Not found" {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("Error: problem finding volume: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if volumeid != "" {
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/Volume::%s", volumeid)
|
||||
} else if volumehref == "" {
|
||||
endpoint.Path = "/api/types/Volume/instances"
|
||||
} else {
|
||||
endpoint.Path = volumehref
|
||||
}
|
||||
|
||||
req := client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+client.configConnect.Version)
|
||||
|
||||
resp, err := client.retryCheckResp(&client.Http, req)
|
||||
if err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if volumehref == "" && volumeid == "" {
|
||||
if err = client.decodeBody(resp, &volumes); err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("error decoding storage pool response: %s", err)
|
||||
}
|
||||
var volumesNew []*types.Volume
|
||||
for _, volume := range volumes {
|
||||
if (!getSnapshots && volume.AncestorVolumeID == ancestorvolumeid) || (getSnapshots && volume.AncestorVolumeID != "") {
|
||||
volumesNew = append(volumesNew, volume)
|
||||
}
|
||||
}
|
||||
volumes = volumesNew
|
||||
} else {
|
||||
volume := &types.Volume{}
|
||||
if err = client.decodeBody(resp, &volume); err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
volumes = append(volumes, volume)
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func (client *Client) FindVolumeID(volumename string) (volumeID string, err error) {
|
||||
|
||||
endpoint := client.SIOEndpoint
|
||||
|
||||
volumeQeryIdByKeyParam := &types.VolumeQeryIdByKeyParam{}
|
||||
volumeQeryIdByKeyParam.Name = volumename
|
||||
|
||||
jsonOutput, err := json.Marshal(&volumeQeryIdByKeyParam)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
endpoint.Path = fmt.Sprintf("/api/types/Volume/instances/action/queryIdByKey")
|
||||
|
||||
req := client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+client.configConnect.Version)
|
||||
|
||||
resp, err := client.retryCheckResp(&client.Http, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.New("error reading body")
|
||||
}
|
||||
|
||||
volumeID = string(bs)
|
||||
|
||||
volumeID = strings.TrimRight(volumeID, `"`)
|
||||
volumeID = strings.TrimLeft(volumeID, `"`)
|
||||
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (client *Client) CreateVolume(volume *types.VolumeParam, storagePoolName string) (volumeResp *types.VolumeResp, err error) {
|
||||
|
||||
endpoint := client.SIOEndpoint
|
||||
|
||||
endpoint.Path = "/api/types/Volume/instances"
|
||||
|
||||
storagePool, err := client.FindStoragePool("", storagePoolName, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volume.StoragePoolID = storagePool.ID
|
||||
volume.ProtectionDomainID = storagePool.ProtectionDomainID
|
||||
|
||||
jsonOutput, err := json.Marshal(&volume)
|
||||
if err != nil {
|
||||
return &types.VolumeResp{}, fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
|
||||
req := client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+client.configConnect.Version)
|
||||
|
||||
resp, err := client.retryCheckResp(&client.Http, req)
|
||||
if err != nil {
|
||||
return &types.VolumeResp{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = client.decodeBody(resp, &volumeResp); err != nil {
|
||||
return &types.VolumeResp{}, fmt.Errorf("error decoding volume creation response: %s", err)
|
||||
}
|
||||
|
||||
return volumeResp, nil
|
||||
}
|
||||
|
||||
func (client *Client) GetStoragePool(storagepoolhref string) (storagePools []*types.StoragePool, err error) {
|
||||
|
||||
endpoint := client.SIOEndpoint
|
||||
|
||||
if storagepoolhref == "" {
|
||||
endpoint.Path = "/api/types/StoragePool/instances"
|
||||
} else {
|
||||
endpoint.Path = storagepoolhref
|
||||
}
|
||||
|
||||
req := client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+client.configConnect.Version)
|
||||
|
||||
resp, err := client.retryCheckResp(&client.Http, req)
|
||||
if err != nil {
|
||||
return []*types.StoragePool{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if storagepoolhref == "" {
|
||||
if err = client.decodeBody(resp, &storagePools); err != nil {
|
||||
return []*types.StoragePool{}, fmt.Errorf("error decoding storage pool response: %s", err)
|
||||
}
|
||||
} else {
|
||||
storagePool := &types.StoragePool{}
|
||||
if err = client.decodeBody(resp, &storagePool); err != nil {
|
||||
return []*types.StoragePool{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
storagePools = append(storagePools, storagePool)
|
||||
}
|
||||
return storagePools, nil
|
||||
}
|
||||
|
||||
func (client *Client) FindStoragePool(id, name, href string) (storagePool *types.StoragePool, err error) {
|
||||
storagePools, err := client.GetStoragePool(href)
|
||||
if err != nil {
|
||||
return &types.StoragePool{}, fmt.Errorf("Error getting storage pool %s", err)
|
||||
}
|
||||
|
||||
for _, storagePool = range storagePools {
|
||||
if storagePool.ID == id || storagePool.Name == name || href != "" {
|
||||
return storagePool, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &types.StoragePool{}, errors.New("Couldn't find storage pool")
|
||||
|
||||
}
|
131
vendor/github.com/codedellemc/goscaleio/protectiondomain.go
generated
vendored
Normal file
131
vendor/github.com/codedellemc/goscaleio/protectiondomain.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type ProtectionDomain struct {
|
||||
ProtectionDomain *types.ProtectionDomain
|
||||
client *Client
|
||||
}
|
||||
|
||||
func NewProtectionDomain(client *Client) *ProtectionDomain {
|
||||
return &ProtectionDomain{
|
||||
ProtectionDomain: new(types.ProtectionDomain),
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func NewProtectionDomainEx(client *Client, pd *types.ProtectionDomain) *ProtectionDomain {
|
||||
return &ProtectionDomain{
|
||||
ProtectionDomain: pd,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (system *System) CreateProtectionDomain(name string) (string, error) {
|
||||
endpoint := system.client.SIOEndpoint
|
||||
|
||||
protectionDomainParam := &types.ProtectionDomainParam{}
|
||||
protectionDomainParam.Name = name
|
||||
|
||||
jsonOutput, err := json.Marshal(&protectionDomainParam)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
endpoint.Path = fmt.Sprintf("/api/types/ProtectionDomain/instances")
|
||||
|
||||
req := system.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", system.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+system.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+system.client.configConnect.Version)
|
||||
|
||||
resp, err := system.client.retryCheckResp(&system.client.Http, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.New("error reading body")
|
||||
}
|
||||
|
||||
var pd types.ProtectionDomainResp
|
||||
err = json.Unmarshal(bs, &pd)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pd.ID, nil
|
||||
}
|
||||
|
||||
func (system *System) GetProtectionDomain(protectiondomainhref string) (protectionDomains []*types.ProtectionDomain, err error) {
|
||||
|
||||
endpoint := system.client.SIOEndpoint
|
||||
|
||||
if protectiondomainhref == "" {
|
||||
link, err := GetLink(system.System.Links, "/api/System/relationship/ProtectionDomain")
|
||||
if err != nil {
|
||||
return []*types.ProtectionDomain{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
|
||||
endpoint.Path = link.HREF
|
||||
} else {
|
||||
endpoint.Path = protectiondomainhref
|
||||
}
|
||||
|
||||
req := system.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", system.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+system.client.configConnect.Version)
|
||||
|
||||
resp, err := system.client.retryCheckResp(&system.client.Http, req)
|
||||
if err != nil {
|
||||
return []*types.ProtectionDomain{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if protectiondomainhref == "" {
|
||||
if err = system.client.decodeBody(resp, &protectionDomains); err != nil {
|
||||
return []*types.ProtectionDomain{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
} else {
|
||||
protectionDomain := &types.ProtectionDomain{}
|
||||
if err = system.client.decodeBody(resp, &protectionDomain); err != nil {
|
||||
return []*types.ProtectionDomain{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
protectionDomains = append(protectionDomains, protectionDomain)
|
||||
|
||||
}
|
||||
//
|
||||
// bs, err := ioutil.ReadAll(resp.Body)
|
||||
// if err != nil {
|
||||
// return []types.ProtectionDomain{}, errors.New("error reading body")
|
||||
// }
|
||||
//
|
||||
// fmt.Println(string(bs))
|
||||
// log.Fatalf("here")
|
||||
// return []types.ProtectionDomain{}, nil
|
||||
return protectionDomains, nil
|
||||
}
|
||||
|
||||
func (system *System) FindProtectionDomain(id, name, href string) (protectionDomain *types.ProtectionDomain, err error) {
|
||||
protectionDomains, err := system.GetProtectionDomain(href)
|
||||
if err != nil {
|
||||
return &types.ProtectionDomain{}, fmt.Errorf("Error getting protection domains %s", err)
|
||||
}
|
||||
|
||||
for _, protectionDomain = range protectionDomains {
|
||||
if protectionDomain.ID == id || protectionDomain.Name == name || href != "" {
|
||||
return protectionDomain, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &types.ProtectionDomain{}, errors.New("Couldn't find protection domain")
|
||||
}
|
35
vendor/github.com/codedellemc/goscaleio/scsiinitiator.go
generated
vendored
Normal file
35
vendor/github.com/codedellemc/goscaleio/scsiinitiator.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
func (system *System) GetScsiInitiator() (scsiInitiators []types.ScsiInitiator, err error) {
|
||||
endpoint := system.client.SIOEndpoint
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/System::%v/relationships/ScsiInitiator", system.System.ID)
|
||||
|
||||
req := system.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", system.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+system.client.configConnect.Version)
|
||||
|
||||
resp, err := system.client.retryCheckResp(&system.client.Http, req)
|
||||
if err != nil {
|
||||
return []types.ScsiInitiator{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = system.client.decodeBody(resp, &scsiInitiators); err != nil {
|
||||
return []types.ScsiInitiator{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
// bs, err := ioutil.ReadAll(resp.Body)
|
||||
// if err != nil {
|
||||
// return types.ScsiInitiator{}, errors.New("error reading body")
|
||||
// }
|
||||
//
|
||||
// log.Fatalf("here")
|
||||
// return types.ScsiInitiator{}, nil
|
||||
return scsiInitiators, nil
|
||||
}
|
188
vendor/github.com/codedellemc/goscaleio/sdc.go
generated
vendored
Normal file
188
vendor/github.com/codedellemc/goscaleio/sdc.go
generated
vendored
Normal file
@@ -0,0 +1,188 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type Sdc struct {
|
||||
Sdc *types.Sdc
|
||||
client *Client
|
||||
}
|
||||
|
||||
func NewSdc(client *Client, sdc *types.Sdc) *Sdc {
|
||||
return &Sdc{
|
||||
Sdc: sdc,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (system *System) GetSdc() (sdcs []types.Sdc, err error) {
|
||||
endpoint := system.client.SIOEndpoint
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/System::%v/relationships/Sdc", system.System.ID)
|
||||
|
||||
req := system.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", system.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+system.client.configConnect.Version)
|
||||
|
||||
resp, err := system.client.retryCheckResp(&system.client.Http, req)
|
||||
if err != nil {
|
||||
return []types.Sdc{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = system.client.decodeBody(resp, &sdcs); err != nil {
|
||||
return []types.Sdc{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
// bs, err := ioutil.ReadAll(resp.Body)
|
||||
// if err != nil {
|
||||
// return []types.Sdc{}, errors.New("error reading body")
|
||||
// }
|
||||
//
|
||||
// fmt.Println(string(bs))
|
||||
// log.Fatalf("here")
|
||||
// return []types.Sdc{}, nil
|
||||
return sdcs, nil
|
||||
}
|
||||
|
||||
func (system *System) FindSdc(field, value string) (sdc *Sdc, err error) {
|
||||
sdcs, err := system.GetSdc()
|
||||
if err != nil {
|
||||
return &Sdc{}, nil
|
||||
}
|
||||
|
||||
for _, sdc := range sdcs {
|
||||
valueOf := reflect.ValueOf(sdc)
|
||||
switch {
|
||||
case reflect.Indirect(valueOf).FieldByName(field).String() == value:
|
||||
return NewSdc(system.client, &sdc), nil
|
||||
}
|
||||
}
|
||||
|
||||
return &Sdc{}, errors.New("Couldn't find SDC")
|
||||
}
|
||||
|
||||
func (sdc *Sdc) GetStatistics() (statistics *types.Statistics, err error) {
|
||||
endpoint := sdc.client.SIOEndpoint
|
||||
|
||||
link, err := GetLink(sdc.Sdc.Links, "/api/Sdc/relationship/Statistics")
|
||||
if err != nil {
|
||||
return &types.Statistics{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
endpoint.Path = link.HREF
|
||||
|
||||
req := sdc.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", sdc.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+sdc.client.configConnect.Version)
|
||||
|
||||
resp, err := sdc.client.retryCheckResp(&sdc.client.Http, req)
|
||||
if err != nil {
|
||||
return &types.Statistics{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = sdc.client.decodeBody(resp, &statistics); err != nil {
|
||||
return &types.Statistics{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
return statistics, nil
|
||||
}
|
||||
|
||||
func (sdc *Sdc) GetVolume() (volumes []*types.Volume, err error) {
|
||||
endpoint := sdc.client.SIOEndpoint
|
||||
|
||||
link, err := GetLink(sdc.Sdc.Links, "/api/Sdc/relationship/Volume")
|
||||
if err != nil {
|
||||
return []*types.Volume{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
endpoint.Path = link.HREF
|
||||
|
||||
req := sdc.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", sdc.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+sdc.client.configConnect.Version)
|
||||
|
||||
resp, err := sdc.client.retryCheckResp(&sdc.client.Http, req)
|
||||
if err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = sdc.client.decodeBody(resp, &volumes); err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func GetSdcLocalGUID() (sdcGUID string, err error) {
|
||||
|
||||
// get sdc kernel guid
|
||||
// /bin/emc/scaleio/drv_cfg --query_guid
|
||||
// sdcKernelGuid := "271bad82-08ee-44f2-a2b1-7e2787c27be1"
|
||||
|
||||
out, err := exec.Command("/opt/emc/scaleio/sdc/bin/drv_cfg", "--query_guid").Output()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error querying volumes: ", err)
|
||||
}
|
||||
|
||||
sdcGUID = strings.Replace(string(out), "\n", "", -1)
|
||||
|
||||
return sdcGUID, nil
|
||||
}
|
||||
|
||||
func (volume *Volume) MapVolumeSdc(mapVolumeSdcParam *types.MapVolumeSdcParam) (err error) {
|
||||
endpoint := volume.client.SIOEndpoint
|
||||
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/Volume::%s/action/addMappedSdc", volume.Volume.ID)
|
||||
|
||||
jsonOutput, err := json.Marshal(&mapVolumeSdcParam)
|
||||
if err != nil {
|
||||
log.Fatalf("error marshaling: %s", err)
|
||||
}
|
||||
|
||||
req := volume.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", volume.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+volume.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+volume.client.configConnect.Version)
|
||||
|
||||
resp, err := volume.client.retryCheckResp(&volume.client.Http, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (volume *Volume) UnmapVolumeSdc(unmapVolumeSdcParam *types.UnmapVolumeSdcParam) (err error) {
|
||||
endpoint := volume.client.SIOEndpoint
|
||||
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/Volume::%s/action/removeMappedSdc", volume.Volume.ID)
|
||||
|
||||
jsonOutput, err := json.Marshal(&unmapVolumeSdcParam)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
|
||||
req := volume.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", volume.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+volume.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+volume.client.configConnect.Version)
|
||||
|
||||
resp, err := volume.client.retryCheckResp(&volume.client.Http, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
122
vendor/github.com/codedellemc/goscaleio/sds.go
generated
vendored
Normal file
122
vendor/github.com/codedellemc/goscaleio/sds.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type Sds struct {
|
||||
Sds *types.Sds
|
||||
client *Client
|
||||
}
|
||||
|
||||
func NewSds(client *Client) *Sds {
|
||||
return &Sds{
|
||||
Sds: new(types.Sds),
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func NewSdsEx(client *Client, sds *types.Sds) *Sds {
|
||||
return &Sds{
|
||||
Sds: sds,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (protectionDomain *ProtectionDomain) CreateSds(name string, ipList []string) (string, error) {
|
||||
endpoint := protectionDomain.client.SIOEndpoint
|
||||
|
||||
sdsParam := &types.SdsParam{}
|
||||
sdsParam.Name = name
|
||||
sdsParam.ProtectionDomainID = protectionDomain.ProtectionDomain.ID
|
||||
|
||||
if len(ipList) == 0 {
|
||||
return "", fmt.Errorf("Must provide at least 1 SDS IP")
|
||||
} else if len(ipList) == 1 {
|
||||
sdsIP := types.SdsIp{IP: ipList[0], Role: "all"}
|
||||
sdsIPList := &types.SdsIpList{sdsIP}
|
||||
sdsParam.IPList = append(sdsParam.IPList, sdsIPList)
|
||||
} else if len(ipList) >= 2 {
|
||||
sdsIP1 := types.SdsIp{IP: ipList[0], Role: "sdcOnly"}
|
||||
sdsIP2 := types.SdsIp{IP: ipList[1], Role: "sdsOnly"}
|
||||
sdsIPList1 := &types.SdsIpList{sdsIP1}
|
||||
sdsIPList2 := &types.SdsIpList{sdsIP2}
|
||||
sdsParam.IPList = append(sdsParam.IPList, sdsIPList1)
|
||||
sdsParam.IPList = append(sdsParam.IPList, sdsIPList2)
|
||||
}
|
||||
|
||||
jsonOutput, err := json.Marshal(&sdsParam)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
endpoint.Path = fmt.Sprintf("/api/types/Sds/instances")
|
||||
|
||||
req := protectionDomain.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", protectionDomain.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+protectionDomain.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+protectionDomain.client.configConnect.Version)
|
||||
|
||||
resp, err := protectionDomain.client.retryCheckResp(&protectionDomain.client.Http, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.New("error reading body")
|
||||
}
|
||||
|
||||
var sds types.SdsResp
|
||||
err = json.Unmarshal(bs, &sds)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return sds.ID, nil
|
||||
}
|
||||
|
||||
func (protectionDomain *ProtectionDomain) GetSds() (sdss []types.Sds, err error) {
|
||||
endpoint := protectionDomain.client.SIOEndpoint
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/ProtectionDomain::%v/relationships/Sds", protectionDomain.ProtectionDomain.ID)
|
||||
|
||||
req := protectionDomain.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", protectionDomain.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+protectionDomain.client.configConnect.Version)
|
||||
|
||||
resp, err := protectionDomain.client.retryCheckResp(&protectionDomain.client.Http, req)
|
||||
if err != nil {
|
||||
return []types.Sds{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = protectionDomain.client.decodeBody(resp, &sdss); err != nil {
|
||||
return []types.Sds{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
return sdss, nil
|
||||
}
|
||||
|
||||
func (protectionDomain *ProtectionDomain) FindSds(field, value string) (sds *types.Sds, err error) {
|
||||
sdss, err := protectionDomain.GetSds()
|
||||
if err != nil {
|
||||
return &types.Sds{}, nil
|
||||
}
|
||||
|
||||
for _, sds := range sdss {
|
||||
valueOf := reflect.ValueOf(sds)
|
||||
switch {
|
||||
case reflect.Indirect(valueOf).FieldByName(field).String() == value:
|
||||
return &sds, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &types.Sds{}, errors.New("Couldn't find SDS")
|
||||
}
|
148
vendor/github.com/codedellemc/goscaleio/storagepool.go
generated
vendored
Normal file
148
vendor/github.com/codedellemc/goscaleio/storagepool.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type StoragePool struct {
|
||||
StoragePool *types.StoragePool
|
||||
client *Client
|
||||
}
|
||||
|
||||
func NewStoragePool(client *Client) *StoragePool {
|
||||
return &StoragePool{
|
||||
StoragePool: new(types.StoragePool),
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func NewStoragePoolEx(client *Client, pool *types.StoragePool) *StoragePool {
|
||||
return &StoragePool{
|
||||
StoragePool: pool,
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (protectionDomain *ProtectionDomain) CreateStoragePool(name string) (string, error) {
|
||||
endpoint := protectionDomain.client.SIOEndpoint
|
||||
|
||||
storagePoolParam := &types.StoragePoolParam{}
|
||||
storagePoolParam.Name = name
|
||||
storagePoolParam.ProtectionDomainID = protectionDomain.ProtectionDomain.ID
|
||||
|
||||
jsonOutput, err := json.Marshal(&storagePoolParam)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
endpoint.Path = fmt.Sprintf("/api/types/StoragePool/instances")
|
||||
|
||||
req := protectionDomain.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", protectionDomain.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+protectionDomain.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+protectionDomain.client.configConnect.Version)
|
||||
|
||||
resp, err := protectionDomain.client.retryCheckResp(&protectionDomain.client.Http, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.New("error reading body")
|
||||
}
|
||||
|
||||
var sp types.StoragePoolResp
|
||||
err = json.Unmarshal(bs, &sp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return sp.ID, nil
|
||||
}
|
||||
|
||||
func (protectionDomain *ProtectionDomain) GetStoragePool(storagepoolhref string) (storagePools []*types.StoragePool, err error) {
|
||||
|
||||
endpoint := protectionDomain.client.SIOEndpoint
|
||||
|
||||
if storagepoolhref == "" {
|
||||
link, err := GetLink(protectionDomain.ProtectionDomain.Links, "/api/ProtectionDomain/relationship/StoragePool")
|
||||
if err != nil {
|
||||
return []*types.StoragePool{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
endpoint.Path = link.HREF
|
||||
} else {
|
||||
endpoint.Path = storagepoolhref
|
||||
}
|
||||
|
||||
req := protectionDomain.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", protectionDomain.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+protectionDomain.client.configConnect.Version)
|
||||
|
||||
resp, err := protectionDomain.client.retryCheckResp(&protectionDomain.client.Http, req)
|
||||
if err != nil {
|
||||
return []*types.StoragePool{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if storagepoolhref == "" {
|
||||
if err = protectionDomain.client.decodeBody(resp, &storagePools); err != nil {
|
||||
return []*types.StoragePool{}, fmt.Errorf("error decoding storage pool response: %s", err)
|
||||
}
|
||||
} else {
|
||||
storagePool := &types.StoragePool{}
|
||||
if err = protectionDomain.client.decodeBody(resp, &storagePool); err != nil {
|
||||
return []*types.StoragePool{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
storagePools = append(storagePools, storagePool)
|
||||
}
|
||||
return storagePools, nil
|
||||
}
|
||||
|
||||
func (protectionDomain *ProtectionDomain) FindStoragePool(id, name, href string) (storagePool *types.StoragePool, err error) {
|
||||
storagePools, err := protectionDomain.GetStoragePool(href)
|
||||
if err != nil {
|
||||
return &types.StoragePool{}, fmt.Errorf("Error getting protection domains %s", err)
|
||||
}
|
||||
|
||||
for _, storagePool = range storagePools {
|
||||
if storagePool.ID == id || storagePool.Name == name || href != "" {
|
||||
return storagePool, nil
|
||||
}
|
||||
}
|
||||
|
||||
return &types.StoragePool{}, errors.New("Couldn't find protection domain")
|
||||
|
||||
}
|
||||
|
||||
func (storagePool *StoragePool) GetStatistics() (statistics *types.Statistics, err error) {
|
||||
link, err := GetLink(storagePool.StoragePool.Links, "/api/StoragePool/relationship/Statistics")
|
||||
if err != nil {
|
||||
return &types.Statistics{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
|
||||
endpoint := storagePool.client.SIOEndpoint
|
||||
endpoint.Path = link.HREF
|
||||
|
||||
req := storagePool.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", storagePool.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
|
||||
resp, err := storagePool.client.retryCheckResp(&storagePool.client.Http, req)
|
||||
if err != nil {
|
||||
return &types.Statistics{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = storagePool.client.decodeBody(resp, &statistics); err != nil {
|
||||
return &types.Statistics{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
return statistics, nil
|
||||
}
|
106
vendor/github.com/codedellemc/goscaleio/system.go
generated
vendored
Normal file
106
vendor/github.com/codedellemc/goscaleio/system.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type System struct {
|
||||
System *types.System
|
||||
client *Client
|
||||
}
|
||||
|
||||
func NewSystem(client *Client) *System {
|
||||
return &System{
|
||||
System: new(types.System),
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (client *Client) FindSystem(instanceID, name, href string) (*System, error) {
|
||||
systems, err := client.GetInstance(href)
|
||||
if err != nil {
|
||||
return &System{}, fmt.Errorf("err: problem getting instances: %s", err)
|
||||
}
|
||||
|
||||
for _, system := range systems {
|
||||
if system.ID == instanceID || system.Name == name || href != "" {
|
||||
outSystem := NewSystem(client)
|
||||
outSystem.System = system
|
||||
return outSystem, nil
|
||||
}
|
||||
}
|
||||
return &System{}, fmt.Errorf("err: systemid or systemname not found")
|
||||
}
|
||||
|
||||
func (system *System) GetStatistics() (statistics *types.Statistics, err error) {
|
||||
endpoint := system.client.SIOEndpoint
|
||||
// endpoint.Path = fmt.Sprintf("/api/instances/System::%v/relationships/Statistics", system.System.ID)
|
||||
|
||||
link, err := GetLink(system.System.Links, "/api/System/relationship/Statistics")
|
||||
if err != nil {
|
||||
return &types.Statistics{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
|
||||
endpoint.Path = link.HREF
|
||||
|
||||
req := system.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", system.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+system.client.configConnect.Version)
|
||||
|
||||
resp, err := system.client.retryCheckResp(&system.client.Http, req)
|
||||
if err != nil {
|
||||
return &types.Statistics{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = system.client.decodeBody(resp, &statistics); err != nil {
|
||||
return &types.Statistics{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
// bs, err := ioutil.ReadAll(resp.Body)
|
||||
// if err != nil {
|
||||
// return errors.New("error reading body")
|
||||
// }
|
||||
//
|
||||
// fmt.Println(string(bs))
|
||||
return statistics, nil
|
||||
}
|
||||
|
||||
func (system *System) CreateSnapshotConsistencyGroup(snapshotVolumesParam *types.SnapshotVolumesParam) (snapshotVolumesResp *types.SnapshotVolumesResp, err error) {
|
||||
endpoint := system.client.SIOEndpoint
|
||||
|
||||
link, err := GetLink(system.System.Links, "self")
|
||||
if err != nil {
|
||||
return &types.SnapshotVolumesResp{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
|
||||
endpoint.Path = fmt.Sprintf("%v/action/snapshotVolumes", link.HREF)
|
||||
|
||||
jsonOutput, err := json.Marshal(&snapshotVolumesParam)
|
||||
if err != nil {
|
||||
return &types.SnapshotVolumesResp{}, fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
|
||||
req := system.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", system.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+system.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+system.client.configConnect.Version)
|
||||
|
||||
resp, err := system.client.retryCheckResp(&system.client.Http, req)
|
||||
if err != nil {
|
||||
return &types.SnapshotVolumesResp{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = system.client.decodeBody(resp, &snapshotVolumesResp); err != nil {
|
||||
return &types.SnapshotVolumesResp{}, fmt.Errorf("error decoding snapshotvolumes response: %s", err)
|
||||
}
|
||||
|
||||
return snapshotVolumesResp, nil
|
||||
|
||||
}
|
388
vendor/github.com/codedellemc/goscaleio/types/v1/types.go
generated
vendored
Normal file
388
vendor/github.com/codedellemc/goscaleio/types/v1/types.go
generated
vendored
Normal file
@@ -0,0 +1,388 @@
|
||||
package goscaleio
|
||||
|
||||
type Error struct {
|
||||
Message string `xml:"message,attr"`
|
||||
MajorErrorCode int `xml:"majorErrorCode,attr"`
|
||||
MinorErrorCode string `xml:"minorErrorCode,attr"`
|
||||
VendorSpecificErrorCode string `xml:"vendorSpecificErrorCode,attr,omitempty"`
|
||||
StackTrace string `xml:"stackTrace,attr,omitempty"`
|
||||
}
|
||||
|
||||
// type session struct {
|
||||
// Link []*types.Link `xml:"Link"`
|
||||
// }
|
||||
|
||||
type System struct {
|
||||
MdmMode string `json:"mdmMode"`
|
||||
MdmClusterState string `json:"mdmClusterState"`
|
||||
SecondaryMdmActorIPList []string `json:"secondaryMdmActorIpList"`
|
||||
InstallID string `json:"installId"`
|
||||
PrimaryActorIPList []string `json:"primaryMdmActorIpList"`
|
||||
SystemVersionName string `json:"systemVersionName"`
|
||||
CapacityAlertHighThresholdPercent int `json:"capacityAlertHighThresholdPercent"`
|
||||
CapacityAlertCriticalThresholdPercent int `json:"capacityAlertCriticalThresholdPercent"`
|
||||
RemoteReadOnlyLimitState bool `json:"remoteReadOnlyLimitState"`
|
||||
PrimaryMdmActorPort int `json:"primaryMdmActorPort"`
|
||||
SecondaryMdmActorPort int `json:"secondaryMdmActorPort"`
|
||||
TiebreakerMdmActorPort int `json:"tiebreakerMdmActorPort"`
|
||||
MdmManagementPort int `json:"mdmManagementPort"`
|
||||
TiebreakerMdmIPList []string `json:"tiebreakerMdmIpList"`
|
||||
MdmManagementIPList []string `json:"mdmManagementIPList"`
|
||||
DefaultIsVolumeObfuscated bool `json:"defaultIsVolumeObfuscated"`
|
||||
RestrictedSdcModeEnabled bool `json:"restrictedSdcModeEnabled"`
|
||||
Swid string `json:"swid"`
|
||||
DaysInstalled int `json:"daysInstalled"`
|
||||
MaxCapacityInGb string `json:"maxCapacityInGb"`
|
||||
CapacityTimeLeftInDays string `json:"capacityTimeLeftInDays"`
|
||||
EnterpriseFeaturesEnabled bool `json:"enterpriseFeaturesEnabled"`
|
||||
IsInitialLicense bool `json:"isInitialLicense"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type Link struct {
|
||||
Rel string `json:"rel"`
|
||||
HREF string `json:"href"`
|
||||
}
|
||||
|
||||
type BWC struct {
|
||||
TotalWeightInKb int `json:"totalWeightInKb"`
|
||||
NumOccured int `json:"numOccured"`
|
||||
NumSeconds int `json:"numSeconds"`
|
||||
}
|
||||
|
||||
type Statistics struct {
|
||||
PrimaryReadFromDevBwc BWC `json:"primaryReadFromDevBwc"`
|
||||
NumOfStoragePools int `json:"numOfStoragePools"`
|
||||
ProtectedCapacityInKb int `json:"protectedCapacityInKb"`
|
||||
MovingCapacityInKb int `json:"movingCapacityInKb"`
|
||||
SnapCapacityInUseOccupiedInKb int `json:"snapCapacityInUseOccupiedInKb"`
|
||||
SnapCapacityInUseInKb int `json:"snapCapacityInUseInKb"`
|
||||
ActiveFwdRebuildCapacityInKb int `json:"activeFwdRebuildCapacityInKb"`
|
||||
DegradedHealthyVacInKb int `json:"degradedHealthyVacInKb"`
|
||||
ActiveMovingRebalanceJobs int `json:"activeMovingRebalanceJobs"`
|
||||
TotalReadBwc BWC `json:"totalReadBwc"`
|
||||
MaxCapacityInKb int `json:"maxCapacityInKb"`
|
||||
PendingBckRebuildCapacityInKb int `json:"pendingBckRebuildCapacityInKb"`
|
||||
ActiveMovingOutFwdRebuildJobs int `json:"activeMovingOutFwdRebuildJobs"`
|
||||
CapacityLimitInKb int `json:"capacityLimitInKb"`
|
||||
SecondaryVacInKb int `json:"secondaryVacInKb"`
|
||||
PendingFwdRebuildCapacityInKb int `json:"pendingFwdRebuildCapacityInKb"`
|
||||
ThinCapacityInUseInKb int `json:"thinCapacityInUseInKb"`
|
||||
AtRestCapacityInKb int `json:"atRestCapacityInKb"`
|
||||
ActiveMovingInBckRebuildJobs int `json:"activeMovingInBckRebuildJobs"`
|
||||
DegradedHealthyCapacityInKb int `json:"degradedHealthyCapacityInKb"`
|
||||
NumOfScsiInitiators int `json:"numOfScsiInitiators"`
|
||||
NumOfUnmappedVolumes int `json:"numOfUnmappedVolumes"`
|
||||
FailedCapacityInKb int `json:"failedCapacityInKb"`
|
||||
SecondaryReadFromDevBwc BWC `json:"secondaryReadFromDevBwc"`
|
||||
NumOfVolumes int `json:"numOfVolumes"`
|
||||
SecondaryWriteBwc BWC `json:"secondaryWriteBwc"`
|
||||
ActiveBckRebuildCapacityInKb int `json:"activeBckRebuildCapacityInKb"`
|
||||
FailedVacInKb int `json:"failedVacInKb"`
|
||||
PendingMovingCapacityInKb int `json:"pendingMovingCapacityInKb"`
|
||||
ActiveMovingInRebalanceJobs int `json:"activeMovingInRebalanceJobs"`
|
||||
PendingMovingInRebalanceJobs int `json:"pendingMovingInRebalanceJobs"`
|
||||
BckRebuildReadBwc BWC `json:"bckRebuildReadBwc"`
|
||||
DegradedFailedVacInKb int `json:"degradedFailedVacInKb"`
|
||||
NumOfSnapshots int `json:"numOfSnapshots"`
|
||||
RebalanceCapacityInKb int `json:"rebalanceCapacityInKb"`
|
||||
fwdRebuildReadBwc BWC `json:"fwdRebuildReadBwc"`
|
||||
NumOfSdc int `json:"numOfSdc"`
|
||||
ActiveMovingInFwdRebuildJobs int `json:"activeMovingInFwdRebuildJobs"`
|
||||
NumOfVtrees int `json:"numOfVtrees"`
|
||||
ThickCapacityInUseInKb int `json:"thickCapacityInUseInKb"`
|
||||
ProtectedVacInKb int `json:"protectedVacInKb"`
|
||||
PendingMovingInBckRebuildJobs int `json:"pendingMovingInBckRebuildJobs"`
|
||||
CapacityAvailableForVolumeAllocationInKb int `json:"capacityAvailableForVolumeAllocationInKb"`
|
||||
PendingRebalanceCapacityInKb int `json:"pendingRebalanceCapacityInKb"`
|
||||
PendingMovingRebalanceJobs int `json:"pendingMovingRebalanceJobs"`
|
||||
NumOfProtectionDomains int `json:"numOfProtectionDomains"`
|
||||
NumOfSds int `json:"numOfSds"`
|
||||
CapacityInUseInKb int `json:"capacityInUseInKb"`
|
||||
BckRebuildWriteBwc BWC `json:"bckRebuildWriteBwc"`
|
||||
DegradedFailedCapacityInKb int `json:"degradedFailedCapacityInKb"`
|
||||
NumOfThinBaseVolumes int `json:"numOfThinBaseVolumes"`
|
||||
PendingMovingOutFwdRebuildJobs int `json:"pendingMovingOutFwdRebuildJobs"`
|
||||
SecondaryReadBwc BWC `json:"secondaryReadBwc"`
|
||||
PendingMovingOutBckRebuildJobs int `json:"pendingMovingOutBckRebuildJobs"`
|
||||
RebalanceWriteBwc BWC `json:"rebalanceWriteBwc"`
|
||||
PrimaryReadBwc BWC `json:"primaryReadBwc"`
|
||||
NumOfVolumesInDeletion int `json:"numOfVolumesInDeletion"`
|
||||
NumOfDevices int `json:"numOfDevices"`
|
||||
RebalanceReadBwc BWC `json:"rebalanceReadBwc"`
|
||||
InUseVacInKb int `json:"inUseVacInKb"`
|
||||
UnreachableUnusedCapacityInKb int `json:"unreachableUnusedCapacityInKb"`
|
||||
TotalWriteBwc BWC `json:"totalWriteBwc"`
|
||||
SpareCapacityInKb int `json:"spareCapacityInKb"`
|
||||
ActiveMovingOutBckRebuildJobs int `json:"activeMovingOutBckRebuildJobs"`
|
||||
PrimaryVacInKb int `json:"primaryVacInKb"`
|
||||
NumOfThickBaseVolumes int `json:"numOfThickBaseVolumes"`
|
||||
BckRebuildCapacityInKb int `json:"bckRebuildCapacityInKb"`
|
||||
NumOfMappedToAllVolumes int `json:"numOfMappedToAllVolumes"`
|
||||
ActiveMovingCapacityInKb int `json:"activeMovingCapacityInKb"`
|
||||
PendingMovingInFwdRebuildJobs int `json:"pendingMovingInFwdRebuildJobs"`
|
||||
ActiveRebalanceCapacityInKb int `json:"activeRebalanceCapacityInKb"`
|
||||
RmcacheSizeInKb int `json:"rmcacheSizeInKb"`
|
||||
FwdRebuildCapacityInKb int `json:"fwdRebuildCapacityInKb"`
|
||||
FwdRebuildWriteBwc BWC `json:"fwdRebuildWriteBwc"`
|
||||
PrimaryWriteBwc BWC `json:"primaryWriteBwc"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
SystemID string `json:"systemId"`
|
||||
UserRole string `json:"userRole"`
|
||||
PasswordChangeRequire bool `json:"passwordChangeRequired"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type ScsiInitiator struct {
|
||||
Name string `json:"name"`
|
||||
IQN string `json:"iqn"`
|
||||
SystemID string `json:"systemID"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type ProtectionDomain struct {
|
||||
SystemID string `json:"systemId"`
|
||||
RebuildNetworkThrottlingInKbps int `json:"rebuildNetworkThrottlingInKbps"`
|
||||
RebalanceNetworkThrottlingInKbps int `json:"rebalanceNetworkThrottlingInKbps"`
|
||||
OverallIoNetworkThrottlingInKbps int `json:"overallIoNetworkThrottlingInKbps"`
|
||||
OverallIoNetworkThrottlingEnabled bool `json:"overallIoNetworkThrottlingEnabled"`
|
||||
RebuildNetworkThrottlingEnabled bool `json:"rebuildNetworkThrottlingEnabled"`
|
||||
RebalanceNetworkThrottlingEnabled bool `json:"rebalanceNetworkThrottlingEnabled"`
|
||||
ProtectionDomainState string `json:"protectionDomainState"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type ProtectionDomainParam struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type ProtectionDomainResp struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type Sdc struct {
|
||||
SystemID string `json:"systemId"`
|
||||
SdcApproved bool `json:"sdcApproved"`
|
||||
SdcIp string `json:"SdcIp"`
|
||||
OnVmWare bool `json:"onVmWare"`
|
||||
SdcGuid string `json:"sdcGuid"`
|
||||
MdmConnectionState string `json:"mdmConnectionState"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type SdsIp struct {
|
||||
IP string `json:"ip"`
|
||||
Role string `json:"role"`
|
||||
}
|
||||
|
||||
type SdsIpList struct {
|
||||
SdsIP SdsIp `json:"SdsIp"`
|
||||
}
|
||||
|
||||
type Sds struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name,omitempty"`
|
||||
ProtectionDomainID string `json:"protectionDomainId"`
|
||||
IPList []*SdsIpList `json:"ipList"`
|
||||
Port int `json:"port,omitempty"`
|
||||
SdsState string `json:"sdsState"`
|
||||
MembershipState string `json:"membershipState"`
|
||||
MdmConnectionState string `json:"mdmConnectionState"`
|
||||
DrlMode string `json:"drlMode,omitempty"`
|
||||
RmcacheEnabled bool `json:"rmcacheEnabled,omitempty"`
|
||||
RmcacheSizeInKb int `json:"rmcacheSizeInKb,omitempty"`
|
||||
RmcacheFrozen bool `json:"rmcacheFrozen,omitempty"`
|
||||
IsOnVMware bool `json:"isOnVmWare,omitempty"`
|
||||
FaultSetID string `json:"faultSetId,omitempty"`
|
||||
NumOfIoBuffers int `json:"numOfIoBuffers,omitempty"`
|
||||
RmcacheMemoryAllocationState string `json:"RmcacheMemoryAllocationState,omitempty"`
|
||||
}
|
||||
|
||||
type DeviceInfo struct {
|
||||
DevicePath string `json:"devicePath"`
|
||||
StoragePoolID string `json:"storagePoolId"`
|
||||
DeviceName string `json:"deviceName,omitempty"`
|
||||
}
|
||||
|
||||
type SdsParam struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
IPList []*SdsIpList `json:"sdsIpList"`
|
||||
Port int `json:"sdsPort,omitempty"`
|
||||
DrlMode string `json:"drlMode,omitempty"`
|
||||
RmcacheEnabled bool `json:"rmcacheEnabled,omitempty"`
|
||||
RmcacheSizeInKb int `json:"rmcacheSizeInKb,omitempty"`
|
||||
RmcacheFrozen bool `json:"rmcacheFrozen,omitempty"`
|
||||
ProtectionDomainID string `json:"protectionDomainId"`
|
||||
FaultSetID string `json:"faultSetId,omitempty"`
|
||||
NumOfIoBuffers int `json:"numOfIoBuffers,omitempty"`
|
||||
DeviceInfoList []*DeviceInfo `json:"deviceInfoList,omitempty"`
|
||||
ForceClean bool `json:"forceClean,omitempty"`
|
||||
DeviceTestTimeSecs int `json:"deviceTestTimeSecs ,omitempty"`
|
||||
DeviceTestMode string `json:"deviceTestMode,omitempty"`
|
||||
}
|
||||
|
||||
type SdsResp struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type Device struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
DeviceCurrentPathname string `json:"deviceCurrentPathname"`
|
||||
DeviceOriginalPathname string `json:"deviceOriginalPathname,omitempty"`
|
||||
DeviceState string `json:"deviceState,omitempty"`
|
||||
ErrorState string `json:"errorState,omitempty"`
|
||||
CapacityLimitInKb int `json:"capacityLimitInKb,omitempty"`
|
||||
MaxCapacityInKb int `json:"maxCapacityInKb,omitempty"`
|
||||
StoragePoolID string `json:"storagePoolId"`
|
||||
SdsID string `json:"sdsId"`
|
||||
}
|
||||
|
||||
type DeviceParam struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
DeviceCurrentPathname string `json:"deviceCurrentPathname"`
|
||||
CapacityLimitInKb int `json:"capacityLimitInKb,omitempty"`
|
||||
StoragePoolID string `json:"storagePoolId"`
|
||||
SdsID string `json:"sdsId"`
|
||||
TestTimeSecs int `json:"testTimeSecs,omitempty"`
|
||||
TestMode string `json:"testMode,omitempty"`
|
||||
}
|
||||
|
||||
type DeviceResp struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type StoragePool struct {
|
||||
ProtectionDomainID string `json:"protectionDomainId"`
|
||||
RebalanceioPriorityPolicy string `json:"rebalanceIoPriorityPolicy"`
|
||||
RebuildioPriorityPolicy string `json:"rebuildIoPriorityPolicy"`
|
||||
RebuildioPriorityBwLimitPerDeviceInKbps int `json:"rebuildIoPriorityBwLimitPerDeviceInKbps"`
|
||||
RebuildioPriorityNumOfConcurrentIosPerDevice int `json:"rebuildIoPriorityNumOfConcurrentIosPerDevice"`
|
||||
RebalanceioPriorityNumOfConcurrentIosPerDevice int `json:"rebalanceIoPriorityNumOfConcurrentIosPerDevice"`
|
||||
RebalanceioPriorityBwLimitPerDeviceInKbps int `json:"rebalanceIoPriorityBwLimitPerDeviceInKbps"`
|
||||
RebuildioPriorityAppIopsPerDeviceThreshold int `json:"rebuildIoPriorityAppIopsPerDeviceThreshold"`
|
||||
RebalanceioPriorityAppIopsPerDeviceThreshold int `json:"rebalanceIoPriorityAppIopsPerDeviceThreshold"`
|
||||
RebuildioPriorityAppBwPerDeviceThresholdInKbps int `json:"rebuildIoPriorityAppBwPerDeviceThresholdInKbps"`
|
||||
RebalanceioPriorityAppBwPerDeviceThresholdInKbps int `json:"rebalanceIoPriorityAppBwPerDeviceThresholdInKbps"`
|
||||
RebuildioPriorityQuietPeriodInMsec int `json:"rebuildIoPriorityQuietPeriodInMsec"`
|
||||
RebalanceioPriorityQuietPeriodInMsec int `json:"rebalanceIoPriorityQuietPeriodInMsec"`
|
||||
ZeroPaddingEnabled bool `json:"zeroPaddingEnabled"`
|
||||
UseRmcache bool `json:"useRmcache"`
|
||||
SparePercentage int `json:"sparePercentage"`
|
||||
RmCacheWriteHandlingMode string `json:"rmcacheWriteHandlingMode"`
|
||||
RebuildEnabled bool `json:"rebuildEnabled"`
|
||||
RebalanceEnabled bool `json:"rebalanceEnabled"`
|
||||
NumofParallelRebuildRebalanceJobsPerDevice int `json:"numOfParallelRebuildRebalanceJobsPerDevice"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type StoragePoolParam struct {
|
||||
Name string `json:"name"`
|
||||
SparePercentage int `json:"sparePercentage,omitempty"`
|
||||
RebuildEnabled bool `json:"rebuildEnabled,omitempty"`
|
||||
RebalanceEnabled bool `json:"rebalanceEnabled,omitempty"`
|
||||
ProtectionDomainID string `json:"protectionDomainId"`
|
||||
ZeroPaddingEnabled bool `json:"zeroPaddingEnabled,omitempty"`
|
||||
UseRmcache bool `json:"useRmcache,omitempty"`
|
||||
RmcacheWriteHandlingMode string `json:"rmcacheWriteHandlingMode,omitempty"`
|
||||
}
|
||||
|
||||
type StoragePoolResp struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type MappedSdcInfo struct {
|
||||
SdcID string `json:"sdcId"`
|
||||
SdcIP string `json:"sdcIp"`
|
||||
LimitIops int `json:"limitIops"`
|
||||
LimitBwInMbps int `json:"limitBwInMbps"`
|
||||
}
|
||||
|
||||
type Volume struct {
|
||||
StoragePoolID string `json:"storagePoolId"`
|
||||
UseRmCache bool `json:"useRmcache"`
|
||||
MappingToAllSdcsEnabled bool `json:"mappingToAllSdcsEnabled"`
|
||||
MappedSdcInfo []*MappedSdcInfo `json:"mappedSdcInfo"`
|
||||
IsObfuscated bool `json:"isObfuscated"`
|
||||
VolumeType string `json:"volumeType"`
|
||||
ConsistencyGroupID string `json:"consistencyGroupId"`
|
||||
VTreeID string `json:"vtreeId"`
|
||||
AncestorVolumeID string `json:"ancestorVolumeId"`
|
||||
MappedScsiInitiatorInfo string `json:"mappedScsiInitiatorInfo"`
|
||||
SizeInKb int `json:"sizeInKb"`
|
||||
CreationTime int `json:"creationTime"`
|
||||
Name string `json:"name"`
|
||||
ID string `json:"id"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type VolumeParam struct {
|
||||
ProtectionDomainID string `json:"protectionDomainId,omitempty"`
|
||||
StoragePoolID string `json:"storagePoolId,omitempty"`
|
||||
UseRmCache string `json:"useRmcache,omitempty"`
|
||||
VolumeType string `json:"volumeType,omitempty"`
|
||||
VolumeSizeInKb string `json:"volumeSizeInKb,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
type VolumeResp struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
type VolumeQeryIdByKeyParam struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
type VolumeQeryBySelectedIdsParam struct {
|
||||
IDs []string `json:"ids"`
|
||||
}
|
||||
|
||||
type MapVolumeSdcParam struct {
|
||||
SdcID string `json:"sdcId,omitempty"`
|
||||
AllowMultipleMappings string `json:"allowMultipleMappings,omitempty"`
|
||||
AllSdcs string `json:"allSdcs,omitempty"`
|
||||
}
|
||||
|
||||
type UnmapVolumeSdcParam struct {
|
||||
SdcID string `json:"sdcId,omitempty"`
|
||||
IgnoreScsiInitiators string `json:"ignoreScsiInitiators,omitempty"`
|
||||
AllSdcs string `json:"allSdcs,omitempty"`
|
||||
}
|
||||
|
||||
type SnapshotDef struct {
|
||||
VolumeID string `json:"volumeId,omitempty"`
|
||||
SnapshotName string `json:"snapshotName,omitempty"`
|
||||
}
|
||||
|
||||
type SnapshotVolumesParam struct {
|
||||
SnapshotDefs []*SnapshotDef `json:"snapshotDefs"`
|
||||
}
|
||||
|
||||
type SnapshotVolumesResp struct {
|
||||
VolumeIDList []string `json:"volumeIdList"`
|
||||
SnapshotGroupID string `json:"snapshotGroupId"`
|
||||
}
|
||||
|
||||
type VTree struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
BaseVolumeID string `json:"baseVolumeId"`
|
||||
StoragePoolID string `json:"storagePoolId"`
|
||||
Links []*Link `json:"links"`
|
||||
}
|
||||
|
||||
type RemoveVolumeParam struct {
|
||||
RemoveMode string `json:"removeMode"`
|
||||
}
|
35
vendor/github.com/codedellemc/goscaleio/user.go
generated
vendored
Normal file
35
vendor/github.com/codedellemc/goscaleio/user.go
generated
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
func (system *System) GetUser() (user []types.User, err error) {
|
||||
endpoint := system.client.SIOEndpoint
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/System::%v/relationships/User", system.System.ID)
|
||||
|
||||
req := system.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", system.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+system.client.configConnect.Version)
|
||||
|
||||
resp, err := system.client.retryCheckResp(&system.client.Http, req)
|
||||
if err != nil {
|
||||
return []types.User{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = system.client.decodeBody(resp, &user); err != nil {
|
||||
return []types.User{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
|
||||
// bs, err := ioutil.ReadAll(resp.Body)
|
||||
// if err != nil {
|
||||
// return types.User{}, errors.New("error reading body")
|
||||
// }
|
||||
//
|
||||
// fmt.Println(string(bs))
|
||||
// return types.User{}, nil
|
||||
return user, nil
|
||||
}
|
278
vendor/github.com/codedellemc/goscaleio/volume.go
generated
vendored
Normal file
278
vendor/github.com/codedellemc/goscaleio/volume.go
generated
vendored
Normal file
@@ -0,0 +1,278 @@
|
||||
package goscaleio
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
types "github.com/codedellemc/goscaleio/types/v1"
|
||||
)
|
||||
|
||||
type SdcMappedVolume struct {
|
||||
MdmID string
|
||||
VolumeID string
|
||||
SdcDevice string
|
||||
// Mounted bool
|
||||
// MountPath bool
|
||||
// Mapped bool
|
||||
}
|
||||
|
||||
type Volume struct {
|
||||
Volume *types.Volume
|
||||
client *Client
|
||||
}
|
||||
|
||||
func NewVolume(client *Client) *Volume {
|
||||
return &Volume{
|
||||
Volume: new(types.Volume),
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (storagePool *StoragePool) GetVolume(volumehref, volumeid, ancestorvolumeid, volumename string, getSnapshots bool) (volumes []*types.Volume, err error) {
|
||||
|
||||
endpoint := storagePool.client.SIOEndpoint
|
||||
|
||||
if volumename != "" {
|
||||
volumeid, err = storagePool.FindVolumeID(volumename)
|
||||
if err != nil && err.Error() == "Not found" {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("Error: problem finding volume: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if volumeid != "" {
|
||||
endpoint.Path = fmt.Sprintf("/api/instances/Volume::%s", volumeid)
|
||||
} else if volumehref == "" {
|
||||
link, err := GetLink(storagePool.StoragePool.Links, "/api/StoragePool/relationship/Volume")
|
||||
if err != nil {
|
||||
return []*types.Volume{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
endpoint.Path = link.HREF
|
||||
} else {
|
||||
endpoint.Path = volumehref
|
||||
}
|
||||
|
||||
req := storagePool.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", storagePool.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
|
||||
resp, err := storagePool.client.retryCheckResp(&storagePool.client.Http, req)
|
||||
if err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if volumehref == "" && volumeid == "" {
|
||||
if err = storagePool.client.decodeBody(resp, &volumes); err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("error decoding storage pool response: %s", err)
|
||||
}
|
||||
var volumesNew []*types.Volume
|
||||
for _, volume := range volumes {
|
||||
if (!getSnapshots && volume.AncestorVolumeID == ancestorvolumeid) || (getSnapshots && volume.AncestorVolumeID != "") {
|
||||
volumesNew = append(volumesNew, volume)
|
||||
}
|
||||
}
|
||||
volumes = volumesNew
|
||||
} else {
|
||||
volume := &types.Volume{}
|
||||
if err = storagePool.client.decodeBody(resp, &volume); err != nil {
|
||||
return []*types.Volume{}, fmt.Errorf("error decoding instances response: %s", err)
|
||||
}
|
||||
volumes = append(volumes, volume)
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
func (storagePool *StoragePool) FindVolumeID(volumename string) (volumeID string, err error) {
|
||||
|
||||
endpoint := storagePool.client.SIOEndpoint
|
||||
|
||||
volumeQeryIdByKeyParam := &types.VolumeQeryIdByKeyParam{}
|
||||
volumeQeryIdByKeyParam.Name = volumename
|
||||
|
||||
jsonOutput, err := json.Marshal(&volumeQeryIdByKeyParam)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
endpoint.Path = fmt.Sprintf("/api/types/Volume/instances/action/queryIdByKey")
|
||||
|
||||
req := storagePool.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", storagePool.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
|
||||
resp, err := storagePool.client.retryCheckResp(&storagePool.client.Http, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errors.New("error reading body")
|
||||
}
|
||||
|
||||
volumeID = string(bs)
|
||||
|
||||
volumeID = strings.TrimRight(volumeID, `"`)
|
||||
volumeID = strings.TrimLeft(volumeID, `"`)
|
||||
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func GetLocalVolumeMap() (mappedVolumes []*SdcMappedVolume, err error) {
|
||||
|
||||
// get sdc kernel guid
|
||||
// /bin/emc/scaleio/drv_cfg --query_guid
|
||||
// sdcKernelGuid := "271bad82-08ee-44f2-a2b1-7e2787c27be1"
|
||||
|
||||
mappedVolumesMap := make(map[string]*SdcMappedVolume)
|
||||
|
||||
out, err := exec.Command("/opt/emc/scaleio/sdc/bin/drv_cfg", "--query_vols").Output()
|
||||
if err != nil {
|
||||
return []*SdcMappedVolume{}, fmt.Errorf("Error querying volumes: ", err)
|
||||
}
|
||||
|
||||
result := string(out)
|
||||
lines := strings.Split(result, "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
split := strings.Split(line, " ")
|
||||
if split[0] == "VOL-ID" {
|
||||
mappedVolume := &SdcMappedVolume{MdmID: split[3], VolumeID: split[1]}
|
||||
mdmVolumeID := fmt.Sprintf("%s-%s", mappedVolume.MdmID, mappedVolume.VolumeID)
|
||||
mappedVolumesMap[mdmVolumeID] = mappedVolume
|
||||
}
|
||||
}
|
||||
|
||||
diskIDPath := "/dev/disk/by-id"
|
||||
files, _ := ioutil.ReadDir(diskIDPath)
|
||||
r, _ := regexp.Compile(`^emc-vol-\w*-\w*$`)
|
||||
for _, f := range files {
|
||||
matched := r.MatchString(f.Name())
|
||||
if matched {
|
||||
mdmVolumeID := strings.Replace(f.Name(), "emc-vol-", "", 1)
|
||||
devPath, _ := filepath.EvalSymlinks(fmt.Sprintf("%s/%s", diskIDPath, f.Name()))
|
||||
if _, ok := mappedVolumesMap[mdmVolumeID]; ok {
|
||||
mappedVolumesMap[mdmVolumeID].SdcDevice = devPath
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
keys := make([]string, 0, len(mappedVolumesMap))
|
||||
for key := range mappedVolumesMap {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
mappedVolumes = append(mappedVolumes, mappedVolumesMap[key])
|
||||
}
|
||||
|
||||
return mappedVolumes, nil
|
||||
}
|
||||
|
||||
func (storagePool *StoragePool) CreateVolume(volume *types.VolumeParam) (volumeResp *types.VolumeResp, err error) {
|
||||
|
||||
endpoint := storagePool.client.SIOEndpoint
|
||||
|
||||
endpoint.Path = "/api/types/Volume/instances"
|
||||
|
||||
volume.StoragePoolID = storagePool.StoragePool.ID
|
||||
volume.ProtectionDomainID = storagePool.StoragePool.ProtectionDomainID
|
||||
|
||||
jsonOutput, err := json.Marshal(&volume)
|
||||
if err != nil {
|
||||
return &types.VolumeResp{}, fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
|
||||
req := storagePool.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
req.SetBasicAuth("", storagePool.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+storagePool.client.configConnect.Version)
|
||||
|
||||
resp, err := storagePool.client.retryCheckResp(&storagePool.client.Http, req)
|
||||
if err != nil {
|
||||
return &types.VolumeResp{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = storagePool.client.decodeBody(resp, &volumeResp); err != nil {
|
||||
return &types.VolumeResp{}, fmt.Errorf("error decoding volume creation response: %s", err)
|
||||
}
|
||||
|
||||
return volumeResp, nil
|
||||
}
|
||||
|
||||
func (volume *Volume) GetVTree() (vtree *types.VTree, err error) {
|
||||
|
||||
endpoint := volume.client.SIOEndpoint
|
||||
|
||||
link, err := GetLink(volume.Volume.Links, "/api/parent/relationship/vtreeId")
|
||||
if err != nil {
|
||||
return &types.VTree{}, errors.New("Error: problem finding link")
|
||||
}
|
||||
endpoint.Path = link.HREF
|
||||
|
||||
req := volume.client.NewRequest(map[string]string{}, "GET", endpoint, nil)
|
||||
req.SetBasicAuth("", volume.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+volume.client.configConnect.Version)
|
||||
|
||||
resp, err := volume.client.retryCheckResp(&volume.client.Http, req)
|
||||
if err != nil {
|
||||
return &types.VTree{}, fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err = volume.client.decodeBody(resp, &vtree); err != nil {
|
||||
return &types.VTree{}, fmt.Errorf("error decoding vtree response: %s", err)
|
||||
}
|
||||
return vtree, nil
|
||||
}
|
||||
|
||||
func (volume *Volume) RemoveVolume(removeMode string) (err error) {
|
||||
|
||||
endpoint := volume.client.SIOEndpoint
|
||||
|
||||
link, err := GetLink(volume.Volume.Links, "self")
|
||||
if err != nil {
|
||||
return errors.New("Error: problem finding link")
|
||||
}
|
||||
endpoint.Path = fmt.Sprintf("%v/action/removeVolume", link.HREF)
|
||||
|
||||
if removeMode == "" {
|
||||
removeMode = "ONLY_ME"
|
||||
}
|
||||
removeVolumeParam := &types.RemoveVolumeParam{
|
||||
RemoveMode: removeMode,
|
||||
}
|
||||
|
||||
jsonOutput, err := json.Marshal(&removeVolumeParam)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshaling: %s", err)
|
||||
}
|
||||
|
||||
req := volume.client.NewRequest(map[string]string{}, "POST", endpoint, bytes.NewBufferString(string(jsonOutput)))
|
||||
|
||||
req.SetBasicAuth("", volume.client.Token)
|
||||
req.Header.Add("Accept", "application/json;version="+volume.client.configConnect.Version)
|
||||
req.Header.Add("Content-Type", "application/json;version="+volume.client.configConnect.Version)
|
||||
|
||||
resp, err := volume.client.retryCheckResp(&volume.client.Http, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem getting response: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user