Merge pull request #19739 from vishh/cadvisor-v0.20.2
Updating cAdvisor deps to v0.20.4
This commit is contained in:
commit
da19f81e0a
82
Godeps/Godeps.json
generated
82
Godeps/Godeps.json
generated
@ -392,6 +392,11 @@
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-units",
|
||||
"Comment": "v0.1.0-21-g0bbddae",
|
||||
"Rev": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/spdystream",
|
||||
"Rev": "c33989bcb56748d2473194d11f8ac3fc563688eb"
|
||||
@ -574,93 +579,93 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/api",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/cache/memory",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/collector",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/container",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/events",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/fs",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/http",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/manager",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/pages",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/storage",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/summary",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/utils",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/validate",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/version",
|
||||
"Comment": "v0.20.1",
|
||||
"Rev": "634965abc45557ed03c268bb193a00cfcbedbd32"
|
||||
"Comment": "v0.20.4",
|
||||
"Rev": "59488ce2c4197f501283739c6a4dd3169999f317"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
@ -758,6 +763,11 @@
|
||||
"ImportPath": "github.com/miekg/dns",
|
||||
"Rev": "3f504e8dabd5d562e997d19ce0200aa41973e1b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mistifyio/go-zfs",
|
||||
"Comment": "v2.1.1-5-g1b4ae6f",
|
||||
"Rev": "1b4ae6fb4e77b095934d4430860ff202060169f8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "740c764bc6149d3f1806231418adb9f52c11bcbf"
|
||||
|
2
Godeps/LICENSES.md
generated
2
Godeps/LICENSES.md
generated
@ -29,6 +29,7 @@ github.com/daviddengcn/go-colortext | BSD?
|
||||
github.com/dgrijalva/jwt-go | spdxMIT
|
||||
github.com/docker/docker | Apache-2
|
||||
github.com/docker/docker/pkg/symlink | spdxBSD3
|
||||
github.com/docker/go-units | Apache-2
|
||||
github.com/docker/spdystream | SeeFile
|
||||
github.com/elazarl/go-bindata-assetfs | spdxBSD2
|
||||
github.com/elazarl/goproxy | BSDWarr
|
||||
@ -60,6 +61,7 @@ github.com/kr/pty | spdxMIT
|
||||
github.com/matttproud/golang_protobuf_extensions | Apache-2
|
||||
github.com/mesos/mesos-go | Apache-2
|
||||
github.com/miekg/dns | spdxBSD3
|
||||
github.com/mistifyio/go-zfs | Apache-2
|
||||
github.com/mitchellh/mapstructure | MITname
|
||||
github.com/mvdan/xurls | spdxBSD3
|
||||
github.com/mxk/go-flowrate | spdxBSD3
|
||||
|
67
Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md
generated
vendored
Normal file
67
Godeps/_workspace/src/github.com/docker/go-units/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
# Contributing to go-units
|
||||
|
||||
Want to hack on go-units? Awesome! Here are instructions to get you started.
|
||||
|
||||
go-units is a part of the [Docker](https://www.docker.com) project, and follows
|
||||
the same rules and principles. If you're already familiar with the way
|
||||
Docker does things, you'll feel right at home.
|
||||
|
||||
Otherwise, go read Docker's
|
||||
[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md),
|
||||
[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md),
|
||||
[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and
|
||||
[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md).
|
||||
|
||||
### Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
191
Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code
generated
vendored
Normal file
191
Godeps/_workspace/src/github.com/docker/go-units/LICENSE.code
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2015 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
425
Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs
generated
vendored
Normal file
425
Godeps/_workspace/src/github.com/docker/go-units/LICENSE.docs
generated
vendored
Normal file
@ -0,0 +1,425 @@
|
||||
Attribution-ShareAlike 4.0 International
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
||||
does not provide legal services or legal advice. Distribution of
|
||||
Creative Commons public licenses does not create a lawyer-client or
|
||||
other relationship. Creative Commons makes its licenses and related
|
||||
information available on an "as-is" basis. Creative Commons gives no
|
||||
warranties regarding its licenses, any material licensed under their
|
||||
terms and conditions, or any related information. Creative Commons
|
||||
disclaims all liability for damages resulting from their use to the
|
||||
fullest extent possible.
|
||||
|
||||
Using Creative Commons Public Licenses
|
||||
|
||||
Creative Commons public licenses provide a standard set of terms and
|
||||
conditions that creators and other rights holders may use to share
|
||||
original works of authorship and other material subject to copyright
|
||||
and certain other rights specified in the public license below. The
|
||||
following considerations are for informational purposes only, are not
|
||||
exhaustive, and do not form part of our licenses.
|
||||
|
||||
Considerations for licensors: Our public licenses are
|
||||
intended for use by those authorized to give the public
|
||||
permission to use material in ways otherwise restricted by
|
||||
copyright and certain other rights. Our licenses are
|
||||
irrevocable. Licensors should read and understand the terms
|
||||
and conditions of the license they choose before applying it.
|
||||
Licensors should also secure all rights necessary before
|
||||
applying our licenses so that the public can reuse the
|
||||
material as expected. Licensors should clearly mark any
|
||||
material not subject to the license. This includes other CC-
|
||||
licensed material, or material used under an exception or
|
||||
limitation to copyright. More considerations for licensors:
|
||||
wiki.creativecommons.org/Considerations_for_licensors
|
||||
|
||||
Considerations for the public: By using one of our public
|
||||
licenses, a licensor grants the public permission to use the
|
||||
licensed material under specified terms and conditions. If
|
||||
the licensor's permission is not necessary for any reason--for
|
||||
example, because of any applicable exception or limitation to
|
||||
copyright--then that use is not regulated by the license. Our
|
||||
licenses grant only permissions under copyright and certain
|
||||
other rights that a licensor has authority to grant. Use of
|
||||
the licensed material may still be restricted for other
|
||||
reasons, including because others have copyright or other
|
||||
rights in the material. A licensor may make special requests,
|
||||
such as asking that all changes be marked or described.
|
||||
Although not required by our licenses, you are encouraged to
|
||||
respect those requests where reasonable. More_considerations
|
||||
for the public:
|
||||
wiki.creativecommons.org/Considerations_for_licensees
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Attribution-ShareAlike 4.0 International Public
|
||||
License
|
||||
|
||||
By exercising the Licensed Rights (defined below), You accept and agree
|
||||
to be bound by the terms and conditions of this Creative Commons
|
||||
Attribution-ShareAlike 4.0 International Public License ("Public
|
||||
License"). To the extent this Public License may be interpreted as a
|
||||
contract, You are granted the Licensed Rights in consideration of Your
|
||||
acceptance of these terms and conditions, and the Licensor grants You
|
||||
such rights in consideration of benefits the Licensor receives from
|
||||
making the Licensed Material available under these terms and
|
||||
conditions.
|
||||
|
||||
|
||||
Section 1 -- Definitions.
|
||||
|
||||
a. Adapted Material means material subject to Copyright and Similar
|
||||
Rights that is derived from or based upon the Licensed Material
|
||||
and in which the Licensed Material is translated, altered,
|
||||
arranged, transformed, or otherwise modified in a manner requiring
|
||||
permission under the Copyright and Similar Rights held by the
|
||||
Licensor. For purposes of this Public License, where the Licensed
|
||||
Material is a musical work, performance, or sound recording,
|
||||
Adapted Material is always produced where the Licensed Material is
|
||||
synched in timed relation with a moving image.
|
||||
|
||||
b. Adapter's License means the license You apply to Your Copyright
|
||||
and Similar Rights in Your contributions to Adapted Material in
|
||||
accordance with the terms and conditions of this Public License.
|
||||
|
||||
c. BY-SA Compatible License means a license listed at
|
||||
creativecommons.org/compatiblelicenses, approved by Creative
|
||||
Commons as essentially the equivalent of this Public License.
|
||||
|
||||
d. Copyright and Similar Rights means copyright and/or similar rights
|
||||
closely related to copyright including, without limitation,
|
||||
performance, broadcast, sound recording, and Sui Generis Database
|
||||
Rights, without regard to how the rights are labeled or
|
||||
categorized. For purposes of this Public License, the rights
|
||||
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
||||
Rights.
|
||||
|
||||
e. Effective Technological Measures means those measures that, in the
|
||||
absence of proper authority, may not be circumvented under laws
|
||||
fulfilling obligations under Article 11 of the WIPO Copyright
|
||||
Treaty adopted on December 20, 1996, and/or similar international
|
||||
agreements.
|
||||
|
||||
f. Exceptions and Limitations means fair use, fair dealing, and/or
|
||||
any other exception or limitation to Copyright and Similar Rights
|
||||
that applies to Your use of the Licensed Material.
|
||||
|
||||
g. License Elements means the license attributes listed in the name
|
||||
of a Creative Commons Public License. The License Elements of this
|
||||
Public License are Attribution and ShareAlike.
|
||||
|
||||
h. Licensed Material means the artistic or literary work, database,
|
||||
or other material to which the Licensor applied this Public
|
||||
License.
|
||||
|
||||
i. Licensed Rights means the rights granted to You subject to the
|
||||
terms and conditions of this Public License, which are limited to
|
||||
all Copyright and Similar Rights that apply to Your use of the
|
||||
Licensed Material and that the Licensor has authority to license.
|
||||
|
||||
j. Licensor means the individual(s) or entity(ies) granting rights
|
||||
under this Public License.
|
||||
|
||||
k. Share means to provide material to the public by any means or
|
||||
process that requires permission under the Licensed Rights, such
|
||||
as reproduction, public display, public performance, distribution,
|
||||
dissemination, communication, or importation, and to make material
|
||||
available to the public including in ways that members of the
|
||||
public may access the material from a place and at a time
|
||||
individually chosen by them.
|
||||
|
||||
l. Sui Generis Database Rights means rights other than copyright
|
||||
resulting from Directive 96/9/EC of the European Parliament and of
|
||||
the Council of 11 March 1996 on the legal protection of databases,
|
||||
as amended and/or succeeded, as well as other essentially
|
||||
equivalent rights anywhere in the world.
|
||||
|
||||
m. You means the individual or entity exercising the Licensed Rights
|
||||
under this Public License. Your has a corresponding meaning.
|
||||
|
||||
|
||||
Section 2 -- Scope.
|
||||
|
||||
a. License grant.
|
||||
|
||||
1. Subject to the terms and conditions of this Public License,
|
||||
the Licensor hereby grants You a worldwide, royalty-free,
|
||||
non-sublicensable, non-exclusive, irrevocable license to
|
||||
exercise the Licensed Rights in the Licensed Material to:
|
||||
|
||||
a. reproduce and Share the Licensed Material, in whole or
|
||||
in part; and
|
||||
|
||||
b. produce, reproduce, and Share Adapted Material.
|
||||
|
||||
2. Exceptions and Limitations. For the avoidance of doubt, where
|
||||
Exceptions and Limitations apply to Your use, this Public
|
||||
License does not apply, and You do not need to comply with
|
||||
its terms and conditions.
|
||||
|
||||
3. Term. The term of this Public License is specified in Section
|
||||
6(a).
|
||||
|
||||
4. Media and formats; technical modifications allowed. The
|
||||
Licensor authorizes You to exercise the Licensed Rights in
|
||||
all media and formats whether now known or hereafter created,
|
||||
and to make technical modifications necessary to do so. The
|
||||
Licensor waives and/or agrees not to assert any right or
|
||||
authority to forbid You from making technical modifications
|
||||
necessary to exercise the Licensed Rights, including
|
||||
technical modifications necessary to circumvent Effective
|
||||
Technological Measures. For purposes of this Public License,
|
||||
simply making modifications authorized by this Section 2(a)
|
||||
(4) never produces Adapted Material.
|
||||
|
||||
5. Downstream recipients.
|
||||
|
||||
a. Offer from the Licensor -- Licensed Material. Every
|
||||
recipient of the Licensed Material automatically
|
||||
receives an offer from the Licensor to exercise the
|
||||
Licensed Rights under the terms and conditions of this
|
||||
Public License.
|
||||
|
||||
b. Additional offer from the Licensor -- Adapted Material.
|
||||
Every recipient of Adapted Material from You
|
||||
automatically receives an offer from the Licensor to
|
||||
exercise the Licensed Rights in the Adapted Material
|
||||
under the conditions of the Adapter's License You apply.
|
||||
|
||||
c. No downstream restrictions. You may not offer or impose
|
||||
any additional or different terms or conditions on, or
|
||||
apply any Effective Technological Measures to, the
|
||||
Licensed Material if doing so restricts exercise of the
|
||||
Licensed Rights by any recipient of the Licensed
|
||||
Material.
|
||||
|
||||
6. No endorsement. Nothing in this Public License constitutes or
|
||||
may be construed as permission to assert or imply that You
|
||||
are, or that Your use of the Licensed Material is, connected
|
||||
with, or sponsored, endorsed, or granted official status by,
|
||||
the Licensor or others designated to receive attribution as
|
||||
provided in Section 3(a)(1)(A)(i).
|
||||
|
||||
b. Other rights.
|
||||
|
||||
1. Moral rights, such as the right of integrity, are not
|
||||
licensed under this Public License, nor are publicity,
|
||||
privacy, and/or other similar personality rights; however, to
|
||||
the extent possible, the Licensor waives and/or agrees not to
|
||||
assert any such rights held by the Licensor to the limited
|
||||
extent necessary to allow You to exercise the Licensed
|
||||
Rights, but not otherwise.
|
||||
|
||||
2. Patent and trademark rights are not licensed under this
|
||||
Public License.
|
||||
|
||||
3. To the extent possible, the Licensor waives any right to
|
||||
collect royalties from You for the exercise of the Licensed
|
||||
Rights, whether directly or through a collecting society
|
||||
under any voluntary or waivable statutory or compulsory
|
||||
licensing scheme. In all other cases the Licensor expressly
|
||||
reserves any right to collect such royalties.
|
||||
|
||||
|
||||
Section 3 -- License Conditions.
|
||||
|
||||
Your exercise of the Licensed Rights is expressly made subject to the
|
||||
following conditions.
|
||||
|
||||
a. Attribution.
|
||||
|
||||
1. If You Share the Licensed Material (including in modified
|
||||
form), You must:
|
||||
|
||||
a. retain the following if it is supplied by the Licensor
|
||||
with the Licensed Material:
|
||||
|
||||
i. identification of the creator(s) of the Licensed
|
||||
Material and any others designated to receive
|
||||
attribution, in any reasonable manner requested by
|
||||
the Licensor (including by pseudonym if
|
||||
designated);
|
||||
|
||||
ii. a copyright notice;
|
||||
|
||||
iii. a notice that refers to this Public License;
|
||||
|
||||
iv. a notice that refers to the disclaimer of
|
||||
warranties;
|
||||
|
||||
v. a URI or hyperlink to the Licensed Material to the
|
||||
extent reasonably practicable;
|
||||
|
||||
b. indicate if You modified the Licensed Material and
|
||||
retain an indication of any previous modifications; and
|
||||
|
||||
c. indicate the Licensed Material is licensed under this
|
||||
Public License, and include the text of, or the URI or
|
||||
hyperlink to, this Public License.
|
||||
|
||||
2. You may satisfy the conditions in Section 3(a)(1) in any
|
||||
reasonable manner based on the medium, means, and context in
|
||||
which You Share the Licensed Material. For example, it may be
|
||||
reasonable to satisfy the conditions by providing a URI or
|
||||
hyperlink to a resource that includes the required
|
||||
information.
|
||||
|
||||
3. If requested by the Licensor, You must remove any of the
|
||||
information required by Section 3(a)(1)(A) to the extent
|
||||
reasonably practicable.
|
||||
|
||||
b. ShareAlike.
|
||||
|
||||
In addition to the conditions in Section 3(a), if You Share
|
||||
Adapted Material You produce, the following conditions also apply.
|
||||
|
||||
1. The Adapter's License You apply must be a Creative Commons
|
||||
license with the same License Elements, this version or
|
||||
later, or a BY-SA Compatible License.
|
||||
|
||||
2. You must include the text of, or the URI or hyperlink to, the
|
||||
Adapter's License You apply. You may satisfy this condition
|
||||
in any reasonable manner based on the medium, means, and
|
||||
context in which You Share Adapted Material.
|
||||
|
||||
3. You may not offer or impose any additional or different terms
|
||||
or conditions on, or apply any Effective Technological
|
||||
Measures to, Adapted Material that restrict exercise of the
|
||||
rights granted under the Adapter's License You apply.
|
||||
|
||||
|
||||
Section 4 -- Sui Generis Database Rights.
|
||||
|
||||
Where the Licensed Rights include Sui Generis Database Rights that
|
||||
apply to Your use of the Licensed Material:
|
||||
|
||||
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
||||
to extract, reuse, reproduce, and Share all or a substantial
|
||||
portion of the contents of the database;
|
||||
|
||||
b. if You include all or a substantial portion of the database
|
||||
contents in a database in which You have Sui Generis Database
|
||||
Rights, then the database in which You have Sui Generis Database
|
||||
Rights (but not its individual contents) is Adapted Material,
|
||||
|
||||
including for purposes of Section 3(b); and
|
||||
c. You must comply with the conditions in Section 3(a) if You Share
|
||||
all or a substantial portion of the contents of the database.
|
||||
|
||||
For the avoidance of doubt, this Section 4 supplements and does not
|
||||
replace Your obligations under this Public License where the Licensed
|
||||
Rights include other Copyright and Similar Rights.
|
||||
|
||||
|
||||
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
||||
|
||||
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
||||
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
||||
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
||||
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
||||
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
||||
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
||||
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
||||
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
||||
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
||||
|
||||
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
||||
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
||||
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
||||
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
||||
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
||||
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
||||
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
||||
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
||||
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
||||
|
||||
c. The disclaimer of warranties and limitation of liability provided
|
||||
above shall be interpreted in a manner that, to the extent
|
||||
possible, most closely approximates an absolute disclaimer and
|
||||
waiver of all liability.
|
||||
|
||||
|
||||
Section 6 -- Term and Termination.
|
||||
|
||||
a. This Public License applies for the term of the Copyright and
|
||||
Similar Rights licensed here. However, if You fail to comply with
|
||||
this Public License, then Your rights under this Public License
|
||||
terminate automatically.
|
||||
|
||||
b. Where Your right to use the Licensed Material has terminated under
|
||||
Section 6(a), it reinstates:
|
||||
|
||||
1. automatically as of the date the violation is cured, provided
|
||||
it is cured within 30 days of Your discovery of the
|
||||
violation; or
|
||||
|
||||
2. upon express reinstatement by the Licensor.
|
||||
|
||||
For the avoidance of doubt, this Section 6(b) does not affect any
|
||||
right the Licensor may have to seek remedies for Your violations
|
||||
of this Public License.
|
||||
|
||||
c. For the avoidance of doubt, the Licensor may also offer the
|
||||
Licensed Material under separate terms or conditions or stop
|
||||
distributing the Licensed Material at any time; however, doing so
|
||||
will not terminate this Public License.
|
||||
|
||||
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
||||
License.
|
||||
|
||||
|
||||
Section 7 -- Other Terms and Conditions.
|
||||
|
||||
a. The Licensor shall not be bound by any additional or different
|
||||
terms or conditions communicated by You unless expressly agreed.
|
||||
|
||||
b. Any arrangements, understandings, or agreements regarding the
|
||||
Licensed Material not stated herein are separate from and
|
||||
independent of the terms and conditions of this Public License.
|
||||
|
||||
|
||||
Section 8 -- Interpretation.
|
||||
|
||||
a. For the avoidance of doubt, this Public License does not, and
|
||||
shall not be interpreted to, reduce, limit, restrict, or impose
|
||||
conditions on any use of the Licensed Material that could lawfully
|
||||
be made without permission under this Public License.
|
||||
|
||||
b. To the extent possible, if any provision of this Public License is
|
||||
deemed unenforceable, it shall be automatically reformed to the
|
||||
minimum extent necessary to make it enforceable. If the provision
|
||||
cannot be reformed, it shall be severed from this Public License
|
||||
without affecting the enforceability of the remaining terms and
|
||||
conditions.
|
||||
|
||||
c. No term or condition of this Public License will be waived and no
|
||||
failure to comply consented to unless expressly agreed to by the
|
||||
Licensor.
|
||||
|
||||
d. Nothing in this Public License constitutes or may be interpreted
|
||||
as a limitation upon, or waiver of, any privileges and immunities
|
||||
that apply to the Licensor or You, including from the legal
|
||||
processes of any jurisdiction or authority.
|
||||
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons is not a party to its public licenses.
|
||||
Notwithstanding, Creative Commons may elect to apply one of its public
|
||||
licenses to material it publishes and in those instances will be
|
||||
considered the "Licensor." Except for the limited purpose of indicating
|
||||
that material is shared under a Creative Commons public license or as
|
||||
otherwise permitted by the Creative Commons policies published at
|
||||
creativecommons.org/policies, Creative Commons does not authorize the
|
||||
use of the trademark "Creative Commons" or any other trademark or logo
|
||||
of Creative Commons without its prior written consent including,
|
||||
without limitation, in connection with any unauthorized modifications
|
||||
to any of its public licenses or any other arrangements,
|
||||
understandings, or agreements concerning use of licensed material. For
|
||||
the avoidance of doubt, this paragraph does not form part of the public
|
||||
licenses.
|
||||
|
||||
Creative Commons may be contacted at creativecommons.org.
|
27
Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS
generated
vendored
Normal file
27
Godeps/_workspace/src/github.com/docker/go-units/MAINTAINERS
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
# go-connections maintainers file
|
||||
#
|
||||
# This file describes who runs the docker/go-connections project and how.
|
||||
# This is a living document - if you see something out of date or missing, speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant parser.
|
||||
#
|
||||
# This file is compiled into the MAINTAINERS file in docker/opensource.
|
||||
#
|
||||
[Org]
|
||||
[Org."Core maintainers"]
|
||||
people = [
|
||||
"calavera",
|
||||
]
|
||||
|
||||
[people]
|
||||
|
||||
# A reference list of all people associated with the project.
|
||||
# All other sections should refer to people by their canonical key
|
||||
# in the people section.
|
||||
|
||||
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
|
||||
[people.calavera]
|
||||
Name = "David Calavera"
|
||||
Email = "david.calavera@gmail.com"
|
||||
GitHub = "calavera"
|
18
Godeps/_workspace/src/github.com/docker/go-units/README.md
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/docker/go-units/README.md
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
[](https://godoc.org/github.com/docker/go-units)
|
||||
|
||||
# Introduction
|
||||
|
||||
go-units is a library to transform human friendly measurements into machine friendly values.
|
||||
|
||||
## Usage
|
||||
|
||||
See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation.
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code
|
||||
is released under the Apache 2.0 license. The README.md file, and files in the
|
||||
"docs" folder are licensed under the Creative Commons Attribution 4.0
|
||||
International License under the terms and conditions set forth in the file
|
||||
"LICENSE.docs". You may obtain a duplicate copy of the same license, titled
|
||||
CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
11
Godeps/_workspace/src/github.com/docker/go-units/circle.yml
generated
vendored
Normal file
11
Godeps/_workspace/src/github.com/docker/go-units/circle.yml
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
dependencies:
|
||||
post:
|
||||
# install golint
|
||||
- go get github.com/golang/lint/golint
|
||||
|
||||
test:
|
||||
pre:
|
||||
# run analysis before tests
|
||||
- go vet ./...
|
||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||
- test -z "$(gofmt -s -l . | tee /dev/stderr)"
|
33
Godeps/_workspace/src/github.com/docker/go-units/duration.go
generated
vendored
Normal file
33
Godeps/_workspace/src/github.com/docker/go-units/duration.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Package units provides helper function to parse and print size and time units
|
||||
// in human-readable format.
|
||||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HumanDuration returns a human-readable approximation of a duration
|
||||
// (eg. "About a minute", "4 hours ago", etc.).
|
||||
func HumanDuration(d time.Duration) string {
|
||||
if seconds := int(d.Seconds()); seconds < 1 {
|
||||
return "Less than a second"
|
||||
} else if seconds < 60 {
|
||||
return fmt.Sprintf("%d seconds", seconds)
|
||||
} else if minutes := int(d.Minutes()); minutes == 1 {
|
||||
return "About a minute"
|
||||
} else if minutes < 60 {
|
||||
return fmt.Sprintf("%d minutes", minutes)
|
||||
} else if hours := int(d.Hours()); hours == 1 {
|
||||
return "About an hour"
|
||||
} else if hours < 48 {
|
||||
return fmt.Sprintf("%d hours", hours)
|
||||
} else if hours < 24*7*2 {
|
||||
return fmt.Sprintf("%d days", hours/24)
|
||||
} else if hours < 24*30*3 {
|
||||
return fmt.Sprintf("%d weeks", hours/24/7)
|
||||
} else if hours < 24*365*2 {
|
||||
return fmt.Sprintf("%d months", hours/24/30)
|
||||
}
|
||||
return fmt.Sprintf("%d years", int(d.Hours())/24/365)
|
||||
}
|
95
Godeps/_workspace/src/github.com/docker/go-units/size.go
generated
vendored
Normal file
95
Godeps/_workspace/src/github.com/docker/go-units/size.go
generated
vendored
Normal file
@ -0,0 +1,95 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// See: http://en.wikipedia.org/wiki/Binary_prefix
|
||||
const (
|
||||
// Decimal
|
||||
|
||||
KB = 1000
|
||||
MB = 1000 * KB
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
|
||||
// Binary
|
||||
|
||||
KiB = 1024
|
||||
MiB = 1024 * KiB
|
||||
GiB = 1024 * MiB
|
||||
TiB = 1024 * GiB
|
||||
PiB = 1024 * TiB
|
||||
)
|
||||
|
||||
type unitMap map[string]int64
|
||||
|
||||
var (
|
||||
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
||||
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
|
||||
sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
|
||||
)
|
||||
|
||||
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
|
||||
// CustomSize returns a human-readable approximation of a size
|
||||
// using custom format.
|
||||
func CustomSize(format string, size float64, base float64, _map []string) string {
|
||||
i := 0
|
||||
for size >= base {
|
||||
size = size / base
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf(format, size, _map[i])
|
||||
}
|
||||
|
||||
// HumanSize returns a human-readable approximation of a size
|
||||
// capped at 4 valid numbers (eg. "2.746 MB", "796 KB").
|
||||
func HumanSize(size float64) string {
|
||||
return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
|
||||
}
|
||||
|
||||
// BytesSize returns a human-readable size in bytes, kibibytes,
|
||||
// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB").
|
||||
func BytesSize(size float64) string {
|
||||
return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
|
||||
}
|
||||
|
||||
// FromHumanSize returns an integer from a human-readable specification of a
|
||||
// size using SI standard (eg. "44kB", "17MB").
|
||||
func FromHumanSize(size string) (int64, error) {
|
||||
return parseSize(size, decimalMap)
|
||||
}
|
||||
|
||||
// RAMInBytes parses a human-readable string representing an amount of RAM
|
||||
// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
|
||||
// returns the number of bytes, or -1 if the string is unparseable.
|
||||
// Units are case-insensitive, and the 'b' suffix is optional.
|
||||
func RAMInBytes(size string) (int64, error) {
|
||||
return parseSize(size, binaryMap)
|
||||
}
|
||||
|
||||
// Parses the human-readable size string into the amount it represents.
|
||||
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||
if len(matches) != 3 {
|
||||
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(matches[1], 10, 0)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
unitPrefix := strings.ToLower(matches[2])
|
||||
if mul, ok := uMap[unitPrefix]; ok {
|
||||
size *= mul
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
118
Godeps/_workspace/src/github.com/docker/go-units/ulimit.go
generated
vendored
Normal file
118
Godeps/_workspace/src/github.com/docker/go-units/ulimit.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Ulimit is a human friendly version of Rlimit.
|
||||
type Ulimit struct {
|
||||
Name string
|
||||
Hard int64
|
||||
Soft int64
|
||||
}
|
||||
|
||||
// Rlimit specifies the resource limits, such as max open files.
|
||||
type Rlimit struct {
|
||||
Type int `json:"type,omitempty"`
|
||||
Hard uint64 `json:"hard,omitempty"`
|
||||
Soft uint64 `json:"soft,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// magic numbers for making the syscall
|
||||
// some of these are defined in the syscall package, but not all.
|
||||
// Also since Windows client doesn't get access to the syscall package, need to
|
||||
// define these here
|
||||
rlimitAs = 9
|
||||
rlimitCore = 4
|
||||
rlimitCPU = 0
|
||||
rlimitData = 2
|
||||
rlimitFsize = 1
|
||||
rlimitLocks = 10
|
||||
rlimitMemlock = 8
|
||||
rlimitMsgqueue = 12
|
||||
rlimitNice = 13
|
||||
rlimitNofile = 7
|
||||
rlimitNproc = 6
|
||||
rlimitRss = 5
|
||||
rlimitRtprio = 14
|
||||
rlimitRttime = 15
|
||||
rlimitSigpending = 11
|
||||
rlimitStack = 3
|
||||
)
|
||||
|
||||
var ulimitNameMapping = map[string]int{
|
||||
//"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container.
|
||||
"core": rlimitCore,
|
||||
"cpu": rlimitCPU,
|
||||
"data": rlimitData,
|
||||
"fsize": rlimitFsize,
|
||||
"locks": rlimitLocks,
|
||||
"memlock": rlimitMemlock,
|
||||
"msgqueue": rlimitMsgqueue,
|
||||
"nice": rlimitNice,
|
||||
"nofile": rlimitNofile,
|
||||
"nproc": rlimitNproc,
|
||||
"rss": rlimitRss,
|
||||
"rtprio": rlimitRtprio,
|
||||
"rttime": rlimitRttime,
|
||||
"sigpending": rlimitSigpending,
|
||||
"stack": rlimitStack,
|
||||
}
|
||||
|
||||
// ParseUlimit parses and returns a Ulimit from the specified string.
|
||||
func ParseUlimit(val string) (*Ulimit, error) {
|
||||
parts := strings.SplitN(val, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid ulimit argument: %s", val)
|
||||
}
|
||||
|
||||
if _, exists := ulimitNameMapping[parts[0]]; !exists {
|
||||
return nil, fmt.Errorf("invalid ulimit type: %s", parts[0])
|
||||
}
|
||||
|
||||
var (
|
||||
soft int64
|
||||
hard = &soft // default to soft in case no hard was set
|
||||
temp int64
|
||||
err error
|
||||
)
|
||||
switch limitVals := strings.Split(parts[1], ":"); len(limitVals) {
|
||||
case 2:
|
||||
temp, err = strconv.ParseInt(limitVals[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hard = &temp
|
||||
fallthrough
|
||||
case 1:
|
||||
soft, err = strconv.ParseInt(limitVals[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1])
|
||||
}
|
||||
|
||||
if soft > *hard {
|
||||
return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard)
|
||||
}
|
||||
|
||||
return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil
|
||||
}
|
||||
|
||||
// GetRlimit returns the RLimit corresponding to Ulimit.
|
||||
func (u *Ulimit) GetRlimit() (*Rlimit, error) {
|
||||
t, exists := ulimitNameMapping[u.Name]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("invalid ulimit name %s", u.Name)
|
||||
}
|
||||
|
||||
return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil
|
||||
}
|
||||
|
||||
func (u *Ulimit) String() string {
|
||||
return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard)
|
||||
}
|
199
Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go
generated
vendored
199
Godeps/_workspace/src/github.com/google/cadvisor/api/versions.go
generated
vendored
@ -19,7 +19,6 @@ import (
|
||||
"net/http"
|
||||
"path"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/info/v2"
|
||||
@ -32,6 +31,7 @@ const (
|
||||
containersApi = "containers"
|
||||
subcontainersApi = "subcontainers"
|
||||
machineApi = "machine"
|
||||
machineStatsApi = "machinestats"
|
||||
dockerApi = "docker"
|
||||
summaryApi = "summary"
|
||||
statsApi = "stats"
|
||||
@ -63,8 +63,9 @@ func getApiVersions() []ApiVersion {
|
||||
v1_2 := newVersion1_2(v1_1)
|
||||
v1_3 := newVersion1_3(v1_2)
|
||||
v2_0 := newVersion2_0()
|
||||
v2_1 := newVersion2_1(v2_0)
|
||||
|
||||
return []ApiVersion{v1_0, v1_1, v1_2, v1_3, v2_0}
|
||||
return []ApiVersion{v1_0, v1_1, v1_2, v1_3, v2_0, v2_1}
|
||||
|
||||
}
|
||||
|
||||
@ -358,47 +359,47 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
|
||||
case statsApi:
|
||||
name := getContainerName(request)
|
||||
glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt)
|
||||
conts, err := m.GetRequestedContainersInfo(name, opt)
|
||||
infos, err := m.GetRequestedContainersInfo(name, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contStats := make(map[string][]v2.ContainerStats, 0)
|
||||
for name, cont := range conts {
|
||||
contStats[name] = convertStats(cont)
|
||||
contStats := make(map[string][]v2.DeprecatedContainerStats, 0)
|
||||
for name, cinfo := range infos {
|
||||
contStats[name] = v2.DeprecatedStatsFromV1(cinfo)
|
||||
}
|
||||
return writeResult(contStats, w)
|
||||
case customMetricsApi:
|
||||
containerName := getContainerName(request)
|
||||
glog.V(4).Infof("Api - Custom Metrics: Looking for metrics for container %q, options %+v", containerName, opt)
|
||||
conts, err := m.GetRequestedContainersInfo(containerName, opt)
|
||||
infos, err := m.GetContainerInfoV2(containerName, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contMetrics := make(map[string]map[string]map[string][]info.MetricValBasic, 0)
|
||||
for _, cont := range conts {
|
||||
for _, cinfo := range infos {
|
||||
metrics := make(map[string]map[string][]info.MetricValBasic, 0)
|
||||
contStats := convertStats(cont)
|
||||
for _, contStat := range contStats {
|
||||
if contStat.HasCustomMetrics {
|
||||
for name, allLabels := range contStat.CustomMetrics {
|
||||
metricLabels := make(map[string][]info.MetricValBasic, 0)
|
||||
for _, metric := range allLabels {
|
||||
if !metric.Timestamp.IsZero() {
|
||||
metVal := info.MetricValBasic{
|
||||
Timestamp: metric.Timestamp,
|
||||
IntValue: metric.IntValue,
|
||||
FloatValue: metric.FloatValue,
|
||||
}
|
||||
labels := metrics[name]
|
||||
if labels != nil {
|
||||
values := labels[metric.Label]
|
||||
values = append(values, metVal)
|
||||
labels[metric.Label] = values
|
||||
metrics[name] = labels
|
||||
} else {
|
||||
metricLabels[metric.Label] = []info.MetricValBasic{metVal}
|
||||
metrics[name] = metricLabels
|
||||
}
|
||||
for _, contStat := range cinfo.Stats {
|
||||
if len(contStat.CustomMetrics) == 0 {
|
||||
continue
|
||||
}
|
||||
for name, allLabels := range contStat.CustomMetrics {
|
||||
metricLabels := make(map[string][]info.MetricValBasic, 0)
|
||||
for _, metric := range allLabels {
|
||||
if !metric.Timestamp.IsZero() {
|
||||
metVal := info.MetricValBasic{
|
||||
Timestamp: metric.Timestamp,
|
||||
IntValue: metric.IntValue,
|
||||
FloatValue: metric.FloatValue,
|
||||
}
|
||||
labels := metrics[name]
|
||||
if labels != nil {
|
||||
values := labels[metric.Label]
|
||||
values = append(values, metVal)
|
||||
labels[metric.Label] = values
|
||||
metrics[name] = labels
|
||||
} else {
|
||||
metricLabels[metric.Label] = []info.MetricValBasic{metVal}
|
||||
metrics[name] = metricLabels
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -451,102 +452,54 @@ func (self *version2_0) HandleRequest(requestType string, request []string, m ma
|
||||
}
|
||||
}
|
||||
|
||||
func instCpuStats(last, cur *info.ContainerStats) (*v2.CpuInstStats, error) {
|
||||
if last == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !cur.Timestamp.After(last.Timestamp) {
|
||||
return nil, fmt.Errorf("container stats move backwards in time")
|
||||
}
|
||||
if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) {
|
||||
return nil, fmt.Errorf("different number of cpus")
|
||||
}
|
||||
timeDelta := cur.Timestamp.Sub(last.Timestamp)
|
||||
if timeDelta <= 100*time.Millisecond {
|
||||
return nil, fmt.Errorf("time delta unexpectedly small")
|
||||
}
|
||||
// Nanoseconds to gain precision and avoid having zero seconds if the
|
||||
// difference between the timestamps is just under a second
|
||||
timeDeltaNs := uint64(timeDelta.Nanoseconds())
|
||||
convertToRate := func(lastValue, curValue uint64) (uint64, error) {
|
||||
if curValue < lastValue {
|
||||
return 0, fmt.Errorf("cumulative stats decrease")
|
||||
}
|
||||
valueDelta := curValue - lastValue
|
||||
return (valueDelta * 1e9) / timeDeltaNs, nil
|
||||
}
|
||||
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
percpu := make([]uint64, len(last.Cpu.Usage.PerCpu))
|
||||
for i := range percpu {
|
||||
var err error
|
||||
percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &v2.CpuInstStats{
|
||||
Usage: v2.CpuInstUsage{
|
||||
Total: total,
|
||||
PerCpu: percpu,
|
||||
User: user,
|
||||
System: system,
|
||||
},
|
||||
}, nil
|
||||
type version2_1 struct {
|
||||
baseVersion *version2_0
|
||||
}
|
||||
|
||||
func convertStats(cont *info.ContainerInfo) []v2.ContainerStats {
|
||||
stats := make([]v2.ContainerStats, 0, len(cont.Stats))
|
||||
var last *info.ContainerStats
|
||||
for _, val := range cont.Stats {
|
||||
stat := v2.ContainerStats{
|
||||
Timestamp: val.Timestamp,
|
||||
HasCpu: cont.Spec.HasCpu,
|
||||
HasMemory: cont.Spec.HasMemory,
|
||||
HasNetwork: cont.Spec.HasNetwork,
|
||||
HasFilesystem: cont.Spec.HasFilesystem,
|
||||
HasDiskIo: cont.Spec.HasDiskIo,
|
||||
HasCustomMetrics: cont.Spec.HasCustomMetrics,
|
||||
}
|
||||
if stat.HasCpu {
|
||||
stat.Cpu = val.Cpu
|
||||
cpuInst, err := instCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if stat.HasMemory {
|
||||
stat.Memory = val.Memory
|
||||
}
|
||||
if stat.HasNetwork {
|
||||
stat.Network.Interfaces = val.Network.Interfaces
|
||||
}
|
||||
if stat.HasFilesystem {
|
||||
stat.Filesystem = val.Filesystem
|
||||
}
|
||||
if stat.HasDiskIo {
|
||||
stat.DiskIo = val.DiskIo
|
||||
}
|
||||
if stat.HasCustomMetrics {
|
||||
stat.CustomMetrics = val.CustomMetrics
|
||||
}
|
||||
// TODO(rjnagal): Handle load stats.
|
||||
stats = append(stats, stat)
|
||||
func newVersion2_1(v *version2_0) *version2_1 {
|
||||
return &version2_1{
|
||||
baseVersion: v,
|
||||
}
|
||||
}
|
||||
|
||||
func (self *version2_1) Version() string {
|
||||
return "v2.1"
|
||||
}
|
||||
|
||||
func (self *version2_1) SupportedRequestTypes() []string {
|
||||
return self.baseVersion.SupportedRequestTypes()
|
||||
}
|
||||
|
||||
func (self *version2_1) HandleRequest(requestType string, request []string, m manager.Manager, w http.ResponseWriter, r *http.Request) error {
|
||||
// Get the query request.
|
||||
opt, err := getRequestOptions(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch requestType {
|
||||
case machineStatsApi:
|
||||
glog.V(4).Infof("Api - MachineStats(%v)", request)
|
||||
cont, err := m.GetRequestedContainersInfo("/", opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return writeResult(v2.MachineStatsFromV1(cont["/"]), w)
|
||||
case statsApi:
|
||||
name := getContainerName(request)
|
||||
glog.V(4).Infof("Api - Stats: Looking for stats for container %q, options %+v", name, opt)
|
||||
conts, err := m.GetRequestedContainersInfo(name, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
contStats := make(map[string][]*v2.ContainerStats, len(conts))
|
||||
for name, cont := range conts {
|
||||
contStats[name] = v2.ContainerStatsFromV1(&cont.Spec, cont.Stats)
|
||||
}
|
||||
return writeResult(contStats, w)
|
||||
default:
|
||||
return self.baseVersion.HandleRequest(requestType, request, m, w, r)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func getRequestOptions(r *http.Request) (v2.RequestOptions, error) {
|
||||
|
4
Godeps/_workspace/src/github.com/google/cadvisor/container/container.go
generated
vendored
4
Godeps/_workspace/src/github.com/google/cadvisor/container/container.go
generated
vendored
@ -81,4 +81,8 @@ type ContainerHandler interface {
|
||||
|
||||
// Cleanup frees up any resources being held like fds or go routines, etc.
|
||||
Cleanup()
|
||||
|
||||
// Start starts any necessary background goroutines - must be cleaned up in Cleanup().
|
||||
// It is expected that most implementations will be a no-op.
|
||||
Start()
|
||||
}
|
||||
|
7
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
7
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/factory.go
generated
vendored
@ -49,6 +49,8 @@ var dockerCgroupRegexp = regexp.MustCompile(`.+-([a-z0-9]{64})\.scope$`)
|
||||
|
||||
var noSystemd = flag.Bool("nosystemd", false, "Explicitly disable systemd support for Docker containers")
|
||||
|
||||
var dockerEnvWhitelist = flag.String("docker_env_metadata_whitelist", "", "a comma-separated list of environment variable keys that needs to be collected for docker containers")
|
||||
|
||||
// TODO(vmarmol): Export run dir too for newer Dockers.
|
||||
// Directory holding Docker container state information.
|
||||
func DockerStateDir() string {
|
||||
@ -92,6 +94,7 @@ const (
|
||||
devicemapperStorageDriver storageDriver = "devicemapper"
|
||||
aufsStorageDriver storageDriver = "aufs"
|
||||
overlayStorageDriver storageDriver = "overlay"
|
||||
zfsStorageDriver storageDriver = "zfs"
|
||||
)
|
||||
|
||||
type dockerFactory struct {
|
||||
@ -117,6 +120,9 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
metadataEnvs := strings.Split(*dockerEnvWhitelist, ",")
|
||||
|
||||
handler, err = newDockerContainerHandler(
|
||||
client,
|
||||
name,
|
||||
@ -125,6 +131,7 @@ func (self *dockerFactory) NewContainerHandler(name string, inHostNamespace bool
|
||||
self.storageDriver,
|
||||
&self.cgroupSubsystems,
|
||||
inHostNamespace,
|
||||
metadataEnvs,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
58
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go
generated
vendored
58
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/fsHandler.go
generated
vendored
@ -26,17 +26,19 @@ import (
|
||||
|
||||
type fsHandler interface {
|
||||
start()
|
||||
usage() uint64
|
||||
usage() (uint64, uint64)
|
||||
stop()
|
||||
}
|
||||
|
||||
type realFsHandler struct {
|
||||
sync.RWMutex
|
||||
lastUpdate time.Time
|
||||
usageBytes uint64
|
||||
period time.Duration
|
||||
storageDirs []string
|
||||
fsInfo fs.FsInfo
|
||||
lastUpdate time.Time
|
||||
usageBytes uint64
|
||||
baseUsageBytes uint64
|
||||
period time.Duration
|
||||
rootfs string
|
||||
extraDir string
|
||||
fsInfo fs.FsInfo
|
||||
// Tells the container to stop.
|
||||
stopChan chan struct{}
|
||||
}
|
||||
@ -45,14 +47,16 @@ const longDu = time.Second
|
||||
|
||||
var _ fsHandler = &realFsHandler{}
|
||||
|
||||
func newFsHandler(period time.Duration, storageDirs []string, fsInfo fs.FsInfo) fsHandler {
|
||||
func newFsHandler(period time.Duration, rootfs, extraDir string, fsInfo fs.FsInfo) fsHandler {
|
||||
return &realFsHandler{
|
||||
lastUpdate: time.Time{},
|
||||
usageBytes: 0,
|
||||
period: period,
|
||||
storageDirs: storageDirs,
|
||||
fsInfo: fsInfo,
|
||||
stopChan: make(chan struct{}, 1),
|
||||
lastUpdate: time.Time{},
|
||||
usageBytes: 0,
|
||||
baseUsageBytes: 0,
|
||||
period: period,
|
||||
rootfs: rootfs,
|
||||
extraDir: extraDir,
|
||||
fsInfo: fsInfo,
|
||||
stopChan: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
@ -61,23 +65,27 @@ func (fh *realFsHandler) needsUpdate() bool {
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) update() error {
|
||||
var usage uint64
|
||||
for _, dir := range fh.storageDirs {
|
||||
// TODO(Vishh): Add support for external mounts.
|
||||
dirUsage, err := fh.fsInfo.GetDirUsage(dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usage += dirUsage
|
||||
// TODO(vishh): Add support for external mounts.
|
||||
baseUsage, err := fh.fsInfo.GetDirUsage(fh.rootfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
extraDirUsage, err := fh.fsInfo.GetDirUsage(fh.extraDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fh.Lock()
|
||||
defer fh.Unlock()
|
||||
fh.lastUpdate = time.Now()
|
||||
fh.usageBytes = usage
|
||||
fh.usageBytes = baseUsage + extraDirUsage
|
||||
fh.baseUsageBytes = baseUsage
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) trackUsage() {
|
||||
fh.update()
|
||||
for {
|
||||
select {
|
||||
case <-fh.stopChan:
|
||||
@ -89,7 +97,7 @@ func (fh *realFsHandler) trackUsage() {
|
||||
}
|
||||
duration := time.Since(start)
|
||||
if duration > longDu {
|
||||
glog.V(3).Infof("`du` on following dirs took %v: %v", duration, fh.storageDirs)
|
||||
glog.V(3).Infof("`du` on following dirs took %v: %v", duration, []string{fh.rootfs, fh.extraDir})
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -103,8 +111,8 @@ func (fh *realFsHandler) stop() {
|
||||
close(fh.stopChan)
|
||||
}
|
||||
|
||||
func (fh *realFsHandler) usage() uint64 {
|
||||
func (fh *realFsHandler) usage() (baseUsageBytes, totalUsageBytes uint64) {
|
||||
fh.RLock()
|
||||
defer fh.RUnlock()
|
||||
return fh.usageBytes
|
||||
return fh.baseUsageBytes, fh.usageBytes
|
||||
}
|
||||
|
52
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
52
Godeps/_workspace/src/github.com/google/cadvisor/container/docker/handler.go
generated
vendored
@ -59,15 +59,16 @@ type dockerContainerHandler struct {
|
||||
// Manager of this container's cgroups.
|
||||
cgroupManager cgroups.Manager
|
||||
|
||||
storageDriver storageDriver
|
||||
fsInfo fs.FsInfo
|
||||
storageDirs []string
|
||||
storageDriver storageDriver
|
||||
fsInfo fs.FsInfo
|
||||
rootfsStorageDir string
|
||||
|
||||
// Time at which this container was created.
|
||||
creationTime time.Time
|
||||
|
||||
// Metadata labels associated with the container.
|
||||
// Metadata associated with the container.
|
||||
labels map[string]string
|
||||
envs map[string]string
|
||||
|
||||
// The container PID used to switch namespaces as required
|
||||
pid int
|
||||
@ -93,6 +94,7 @@ func newDockerContainerHandler(
|
||||
storageDriver storageDriver,
|
||||
cgroupSubsystems *containerlibcontainer.CgroupSubsystems,
|
||||
inHostNamespace bool,
|
||||
metadataEnvs []string,
|
||||
) (container.ContainerHandler, error) {
|
||||
// Create the cgroup paths.
|
||||
cgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))
|
||||
@ -116,14 +118,13 @@ func newDockerContainerHandler(
|
||||
id := ContainerNameToDockerId(name)
|
||||
|
||||
// Add the Containers dir where the log files are stored.
|
||||
storageDirs := []string{path.Join(*dockerRootDir, pathToContainersDir, id)}
|
||||
|
||||
otherStorageDir := path.Join(*dockerRootDir, pathToContainersDir, id)
|
||||
var rootfsStorageDir string
|
||||
switch storageDriver {
|
||||
case aufsStorageDriver:
|
||||
// Add writable layer for aufs.
|
||||
storageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))
|
||||
rootfsStorageDir = path.Join(*dockerRootDir, pathToAufsDir, id)
|
||||
case overlayStorageDriver:
|
||||
storageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToOverlayDir, id))
|
||||
rootfsStorageDir = path.Join(*dockerRootDir, pathToOverlayDir, id)
|
||||
}
|
||||
|
||||
handler := &dockerContainerHandler{
|
||||
@ -136,13 +137,10 @@ func newDockerContainerHandler(
|
||||
storageDriver: storageDriver,
|
||||
fsInfo: fsInfo,
|
||||
rootFs: rootFs,
|
||||
storageDirs: storageDirs,
|
||||
fsHandler: newFsHandler(time.Minute, storageDirs, fsInfo),
|
||||
rootfsStorageDir: rootfsStorageDir,
|
||||
fsHandler: newFsHandler(time.Minute, rootfsStorageDir, otherStorageDir, fsInfo),
|
||||
}
|
||||
|
||||
// Start the filesystem handler.
|
||||
handler.fsHandler.start()
|
||||
|
||||
// We assume that if Inspect fails then the container is not known to docker.
|
||||
ctnr, err := client.InspectContainer(id)
|
||||
if err != nil {
|
||||
@ -157,9 +155,24 @@ func newDockerContainerHandler(
|
||||
handler.image = ctnr.Config.Image
|
||||
handler.networkMode = ctnr.HostConfig.NetworkMode
|
||||
|
||||
// split env vars to get metadata map.
|
||||
for _, exposedEnv := range metadataEnvs {
|
||||
for _, envVar := range ctnr.Config.Env {
|
||||
splits := strings.SplitN(envVar, "=", 2)
|
||||
if splits[0] == exposedEnv {
|
||||
handler.envs[strings.ToLower(exposedEnv)] = splits[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return handler, nil
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) Start() {
|
||||
// Start the filesystem handler.
|
||||
self.fsHandler.start()
|
||||
}
|
||||
|
||||
func (self *dockerContainerHandler) Cleanup() {
|
||||
self.fsHandler.stop()
|
||||
}
|
||||
@ -239,13 +252,14 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
spec.CreationTime = self.creationTime
|
||||
|
||||
switch self.storageDriver {
|
||||
case aufsStorageDriver, overlayStorageDriver:
|
||||
case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
|
||||
spec.HasFilesystem = true
|
||||
default:
|
||||
spec.HasFilesystem = false
|
||||
}
|
||||
|
||||
spec.Labels = self.labels
|
||||
spec.Envs = self.envs
|
||||
spec.Image = self.image
|
||||
spec.HasNetwork = hasNet(self.networkMode)
|
||||
|
||||
@ -254,14 +268,12 @@ func (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
|
||||
func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {
|
||||
switch self.storageDriver {
|
||||
case aufsStorageDriver, overlayStorageDriver:
|
||||
case aufsStorageDriver, overlayStorageDriver, zfsStorageDriver:
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
// As of now we assume that all the storage dirs are on the same device.
|
||||
// The first storage dir will be that of the image layers.
|
||||
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0])
|
||||
deviceInfo, err := self.fsInfo.GetDirFsDevice(self.rootfsStorageDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -281,7 +293,7 @@ func (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error
|
||||
|
||||
fsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}
|
||||
|
||||
fsStat.Usage = self.fsHandler.usage()
|
||||
fsStat.BaseUsage, fsStat.Usage = self.fsHandler.usage()
|
||||
stats.Filesystem = append(stats.Filesystem, fsStat)
|
||||
|
||||
return nil
|
||||
|
2
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
2
Godeps/_workspace/src/github.com/google/cadvisor/container/libcontainer/helpers.go
generated
vendored
@ -378,6 +378,8 @@ func toContainerStats1(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
func toContainerStats2(s *cgroups.Stats, ret *info.ContainerStats) {
|
||||
ret.Memory.Usage = s.MemoryStats.Usage.Usage
|
||||
ret.Memory.Failcnt = s.MemoryStats.Usage.Failcnt
|
||||
ret.Memory.Cache = s.MemoryStats.Stats["cache"]
|
||||
ret.Memory.RSS = s.MemoryStats.Stats["rss"]
|
||||
if v, ok := s.MemoryStats.Stats["pgfault"]; ok {
|
||||
ret.Memory.ContainerData.Pgfault = v
|
||||
ret.Memory.HierarchicalData.Pgfault = v
|
||||
|
2
Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go
generated
vendored
2
Godeps/_workspace/src/github.com/google/cadvisor/container/mock.go
generated
vendored
@ -51,6 +51,8 @@ func (self *MockContainerHandler) ContainerReference() (info.ContainerReference,
|
||||
return args.Get(0).(info.ContainerReference), args.Error(1)
|
||||
}
|
||||
|
||||
func (self *MockContainerHandler) Start() {}
|
||||
|
||||
func (self *MockContainerHandler) Cleanup() {}
|
||||
|
||||
func (self *MockContainerHandler) GetSpec() (info.ContainerSpec, error) {
|
||||
|
3
Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
3
Godeps/_workspace/src/github.com/google/cadvisor/container/raw/handler.go
generated
vendored
@ -166,6 +166,9 @@ func (self *rawContainerHandler) GetRootNetworkDevices() ([]info.NetInfo, error)
|
||||
return nd, nil
|
||||
}
|
||||
|
||||
// Nothing to start up.
|
||||
func (self *rawContainerHandler) Start() {}
|
||||
|
||||
// Nothing to clean up.
|
||||
func (self *rawContainerHandler) Cleanup() {}
|
||||
|
||||
|
23
Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go
generated
vendored
23
Godeps/_workspace/src/github.com/google/cadvisor/fs/fs.go
generated
vendored
@ -32,6 +32,7 @@ import (
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/golang/glog"
|
||||
zfs "github.com/mistifyio/go-zfs"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -73,8 +74,10 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
// all ext systems are checked through prefix.
|
||||
"btrfs": true,
|
||||
"xfs": true,
|
||||
"zfs": true,
|
||||
}
|
||||
for _, mount := range mounts {
|
||||
var Fstype string
|
||||
if !strings.HasPrefix(mount.Fstype, "ext") && !supportedFsType[mount.Fstype] {
|
||||
continue
|
||||
}
|
||||
@ -82,7 +85,11 @@ func NewFsInfo(context Context) (FsInfo, error) {
|
||||
if _, ok := partitions[mount.Source]; ok {
|
||||
continue
|
||||
}
|
||||
if mount.Fstype == "zfs" {
|
||||
Fstype = mount.Fstype
|
||||
}
|
||||
partitions[mount.Source] = partition{
|
||||
fsType: Fstype,
|
||||
mountpoint: mount.Mountpoint,
|
||||
major: uint(mount.Major),
|
||||
minor: uint(mount.Minor),
|
||||
@ -128,7 +135,7 @@ func getDockerImagePaths(context Context) []string {
|
||||
// TODO(rjnagal): Detect docker root and graphdriver directories from docker info.
|
||||
dockerRoot := context.DockerRoot
|
||||
dockerImagePaths := []string{}
|
||||
for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay"} {
|
||||
for _, dir := range []string{"devicemapper", "btrfs", "aufs", "overlay", "zfs"} {
|
||||
dockerImagePaths = append(dockerImagePaths, path.Join(dockerRoot, dir))
|
||||
}
|
||||
for dockerRoot != "/" && dockerRoot != "." {
|
||||
@ -201,6 +208,8 @@ func (self *RealFsInfo) GetFsInfoForPath(mountSet map[string]struct{}) ([]Fs, er
|
||||
switch partition.fsType {
|
||||
case "devicemapper":
|
||||
total, free, avail, err = getDMStats(device, partition.blockSize)
|
||||
case "zfs":
|
||||
total, free, avail, err = getZfstats(device)
|
||||
default:
|
||||
total, free, avail, err = getVfsStats(partition.mountpoint)
|
||||
}
|
||||
@ -423,3 +432,15 @@ func parseDMStatus(dmStatus string) (uint64, uint64, error) {
|
||||
|
||||
return used, total, nil
|
||||
}
|
||||
|
||||
// getZfstats returns ZFS mount stats using zfsutils
|
||||
func getZfstats(poolName string) (uint64, uint64, uint64, error) {
|
||||
dataset, err := zfs.GetDataset(poolName)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
|
||||
total := dataset.Used + dataset.Avail + dataset.Usedbydataset
|
||||
|
||||
return total, dataset.Avail, dataset.Avail, nil
|
||||
}
|
||||
|
15
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go
generated
vendored
15
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/container.go
generated
vendored
@ -45,6 +45,8 @@ type ContainerSpec struct {
|
||||
|
||||
// Metadata labels associated with this container.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
// Metadata envs associated with this container. Only whitelisted envs are added.
|
||||
Envs map[string]string `json:"envs,omitempty"`
|
||||
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu CpuSpec `json:"cpu,omitempty"`
|
||||
@ -306,6 +308,15 @@ type MemoryStats struct {
|
||||
// Units: Bytes.
|
||||
Usage uint64 `json:"usage"`
|
||||
|
||||
// Number of bytes of page cache memory.
|
||||
// Units: Bytes.
|
||||
Cache uint64 `json:"cache"`
|
||||
|
||||
// The amount of anonymous and swap cache memory (includes transparent
|
||||
// hugepages).
|
||||
// Units: Bytes.
|
||||
RSS uint64 `json:"rss"`
|
||||
|
||||
// The amount of working set memory, this includes recently accessed memory,
|
||||
// dirty memory, and kernel memory. Working set is <= "usage".
|
||||
// Units: Bytes.
|
||||
@ -387,6 +398,10 @@ type FsStats struct {
|
||||
// Number of bytes that is consumed by the container on this filesystem.
|
||||
Usage uint64 `json:"usage"`
|
||||
|
||||
// Base Usage that is consumed by the container's writable layer.
|
||||
// This field is only applicable for docker container's as of now.
|
||||
BaseUsage uint64 `json:"base_usage"`
|
||||
|
||||
// Number of bytes available for non-root user.
|
||||
Available uint64 `json:"available"`
|
||||
|
||||
|
2
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/test/datagen.go
generated
vendored
2
Godeps/_workspace/src/github.com/google/cadvisor/info/v1/test/datagen.go
generated
vendored
@ -44,6 +44,8 @@ func GenerateRandomStats(numStats, numCores int, duration time.Duration) []*info
|
||||
stats.Cpu.Usage.User = stats.Cpu.Usage.Total
|
||||
stats.Cpu.Usage.System = 0
|
||||
stats.Memory.Usage = uint64(rand.Int63n(4096))
|
||||
stats.Memory.Cache = uint64(rand.Int63n(4096))
|
||||
stats.Memory.RSS = uint64(rand.Int63n(4096))
|
||||
ret[i] = stats
|
||||
}
|
||||
return ret
|
||||
|
42
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go
generated
vendored
42
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/container.go
generated
vendored
@ -52,6 +52,14 @@ type MemorySpec struct {
|
||||
SwapLimit uint64 `json:"swap_limit,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerInfo struct {
|
||||
// Describes the container.
|
||||
Spec ContainerSpec `json:"spec,omitempty"`
|
||||
|
||||
// Historical statistics gathered from the container.
|
||||
Stats []*ContainerStats `json:"stats,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerSpec struct {
|
||||
// Time at which the container was created.
|
||||
CreationTime time.Time `json:"creation_time,omitempty"`
|
||||
@ -66,6 +74,8 @@ type ContainerSpec struct {
|
||||
|
||||
// Metadata labels associated with this container.
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
// Metadata envs associated with this container. Only whitelisted envs are added.
|
||||
Envs map[string]string `json:"envs,omitempty"`
|
||||
|
||||
HasCpu bool `json:"has_cpu"`
|
||||
Cpu CpuSpec `json:"cpu,omitempty"`
|
||||
@ -85,7 +95,7 @@ type ContainerSpec struct {
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
type DeprecatedContainerStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// CPU statistics
|
||||
@ -114,6 +124,28 @@ type ContainerStats struct {
|
||||
CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"`
|
||||
}
|
||||
|
||||
type ContainerStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// CPU statistics
|
||||
// In nanoseconds (aggregated)
|
||||
Cpu *v1.CpuStats `json:"cpu,omitempty"`
|
||||
// In nanocores per second (instantaneous)
|
||||
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
|
||||
// Disk IO statistics
|
||||
DiskIo *v1.DiskIoStats `json:"diskio,omitempty"`
|
||||
// Memory statistics
|
||||
Memory *v1.MemoryStats `json:"memory,omitempty"`
|
||||
// Network statistics
|
||||
Network *NetworkStats `json:"network,omitempty"`
|
||||
// Filesystem statistics
|
||||
Filesystem *FilesystemStats `json:"filesystem,omitempty"`
|
||||
// Task load statistics
|
||||
Load *v1.LoadStats `json:"load_stats,omitempty"`
|
||||
// Custom Metrics
|
||||
CustomMetrics map[string][]v1.MetricVal `json:"custom_metrics,omitempty"`
|
||||
}
|
||||
|
||||
type Percentiles struct {
|
||||
// Indicates whether the stats are present or not.
|
||||
// If true, values below do not have any data.
|
||||
@ -252,3 +284,11 @@ type CpuInstUsage struct {
|
||||
// Unit: nanocores per second
|
||||
System uint64 `json:"system"`
|
||||
}
|
||||
|
||||
// Filesystem usage statistics.
|
||||
type FilesystemStats struct {
|
||||
// Total Number of bytes consumed by container.
|
||||
TotalUsageBytes *uint64 `json:"totalUsageBytes,omitempty"`
|
||||
// Number of bytes consumed by a container through its root filesystem.
|
||||
BaseUsageBytes *uint64 `json:"baseUsageBytes,omitempty"`
|
||||
}
|
||||
|
322
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
Normal file
322
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/conversion.go
generated
vendored
Normal file
@ -0,0 +1,322 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
func machineFsStatsFromV1(fsStats []v1.FsStats) []MachineFsStats {
|
||||
var result []MachineFsStats
|
||||
for _, stat := range fsStats {
|
||||
readDuration := time.Millisecond * time.Duration(stat.ReadTime)
|
||||
writeDuration := time.Millisecond * time.Duration(stat.WriteTime)
|
||||
ioDuration := time.Millisecond * time.Duration(stat.IoTime)
|
||||
weightedDuration := time.Millisecond * time.Duration(stat.WeightedIoTime)
|
||||
result = append(result, MachineFsStats{
|
||||
Device: stat.Device,
|
||||
Capacity: &stat.Limit,
|
||||
Usage: &stat.Usage,
|
||||
Available: &stat.Available,
|
||||
DiskStats: DiskStats{
|
||||
ReadsCompleted: &stat.ReadsCompleted,
|
||||
ReadsMerged: &stat.ReadsMerged,
|
||||
SectorsRead: &stat.SectorsRead,
|
||||
ReadDuration: &readDuration,
|
||||
WritesCompleted: &stat.WritesCompleted,
|
||||
WritesMerged: &stat.WritesMerged,
|
||||
SectorsWritten: &stat.SectorsWritten,
|
||||
WriteDuration: &writeDuration,
|
||||
IoInProgress: &stat.IoInProgress,
|
||||
IoDuration: &ioDuration,
|
||||
WeightedIoDuration: &weightedDuration,
|
||||
},
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func MachineStatsFromV1(cont *v1.ContainerInfo) []MachineStats {
|
||||
var stats []MachineStats
|
||||
var last *v1.ContainerStats
|
||||
for _, val := range cont.Stats {
|
||||
stat := MachineStats{
|
||||
Timestamp: val.Timestamp,
|
||||
}
|
||||
if cont.Spec.HasCpu {
|
||||
stat.Cpu = &val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if cont.Spec.HasMemory {
|
||||
stat.Memory = &val.Memory
|
||||
}
|
||||
if cont.Spec.HasNetwork {
|
||||
stat.Network = &NetworkStats{
|
||||
// FIXME: Use reflection instead.
|
||||
Tcp: TcpStat(val.Network.Tcp),
|
||||
Tcp6: TcpStat(val.Network.Tcp6),
|
||||
Interfaces: val.Network.Interfaces,
|
||||
}
|
||||
}
|
||||
if cont.Spec.HasFilesystem {
|
||||
stat.Filesystem = machineFsStatsFromV1(val.Filesystem)
|
||||
}
|
||||
// TODO(rjnagal): Handle load stats.
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func ContainerStatsFromV1(spec *v1.ContainerSpec, stats []*v1.ContainerStats) []*ContainerStats {
|
||||
newStats := make([]*ContainerStats, 0, len(stats))
|
||||
var last *v1.ContainerStats
|
||||
for _, val := range stats {
|
||||
stat := &ContainerStats{
|
||||
Timestamp: val.Timestamp,
|
||||
}
|
||||
if spec.HasCpu {
|
||||
stat.Cpu = &val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if spec.HasMemory {
|
||||
stat.Memory = &val.Memory
|
||||
}
|
||||
if spec.HasNetwork {
|
||||
// TODO: Handle TcpStats
|
||||
stat.Network = &NetworkStats{
|
||||
Interfaces: val.Network.Interfaces,
|
||||
}
|
||||
}
|
||||
if spec.HasFilesystem {
|
||||
if len(val.Filesystem) == 1 {
|
||||
stat.Filesystem = &FilesystemStats{
|
||||
TotalUsageBytes: &val.Filesystem[0].Usage,
|
||||
BaseUsageBytes: &val.Filesystem[0].BaseUsage,
|
||||
}
|
||||
} else if len(val.Filesystem) > 1 {
|
||||
// Cannot handle multiple devices per container.
|
||||
glog.Errorf("failed to handle multiple devices for container. Skipping Filesystem stats")
|
||||
}
|
||||
}
|
||||
if spec.HasDiskIo {
|
||||
stat.DiskIo = &val.DiskIo
|
||||
}
|
||||
if spec.HasCustomMetrics {
|
||||
stat.CustomMetrics = val.CustomMetrics
|
||||
}
|
||||
// TODO(rjnagal): Handle load stats.
|
||||
newStats = append(newStats, stat)
|
||||
}
|
||||
return newStats
|
||||
}
|
||||
|
||||
func DeprecatedStatsFromV1(cont *v1.ContainerInfo) []DeprecatedContainerStats {
|
||||
stats := make([]DeprecatedContainerStats, 0, len(cont.Stats))
|
||||
var last *v1.ContainerStats
|
||||
for _, val := range cont.Stats {
|
||||
stat := DeprecatedContainerStats{
|
||||
Timestamp: val.Timestamp,
|
||||
HasCpu: cont.Spec.HasCpu,
|
||||
HasMemory: cont.Spec.HasMemory,
|
||||
HasNetwork: cont.Spec.HasNetwork,
|
||||
HasFilesystem: cont.Spec.HasFilesystem,
|
||||
HasDiskIo: cont.Spec.HasDiskIo,
|
||||
HasCustomMetrics: cont.Spec.HasCustomMetrics,
|
||||
}
|
||||
if stat.HasCpu {
|
||||
stat.Cpu = val.Cpu
|
||||
cpuInst, err := InstCpuStats(last, val)
|
||||
if err != nil {
|
||||
glog.Warningf("Could not get instant cpu stats: %v", err)
|
||||
} else {
|
||||
stat.CpuInst = cpuInst
|
||||
}
|
||||
last = val
|
||||
}
|
||||
if stat.HasMemory {
|
||||
stat.Memory = val.Memory
|
||||
}
|
||||
if stat.HasNetwork {
|
||||
stat.Network.Interfaces = val.Network.Interfaces
|
||||
}
|
||||
if stat.HasFilesystem {
|
||||
stat.Filesystem = val.Filesystem
|
||||
}
|
||||
if stat.HasDiskIo {
|
||||
stat.DiskIo = val.DiskIo
|
||||
}
|
||||
if stat.HasCustomMetrics {
|
||||
stat.CustomMetrics = val.CustomMetrics
|
||||
}
|
||||
// TODO(rjnagal): Handle load stats.
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
func InstCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) {
|
||||
if last == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !cur.Timestamp.After(last.Timestamp) {
|
||||
return nil, fmt.Errorf("container stats move backwards in time")
|
||||
}
|
||||
if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) {
|
||||
return nil, fmt.Errorf("different number of cpus")
|
||||
}
|
||||
timeDelta := cur.Timestamp.Sub(last.Timestamp)
|
||||
if timeDelta <= 100*time.Millisecond {
|
||||
return nil, fmt.Errorf("time delta unexpectedly small")
|
||||
}
|
||||
// Nanoseconds to gain precision and avoid having zero seconds if the
|
||||
// difference between the timestamps is just under a second
|
||||
timeDeltaNs := uint64(timeDelta.Nanoseconds())
|
||||
convertToRate := func(lastValue, curValue uint64) (uint64, error) {
|
||||
if curValue < lastValue {
|
||||
return 0, fmt.Errorf("cumulative stats decrease")
|
||||
}
|
||||
valueDelta := curValue - lastValue
|
||||
return (valueDelta * 1e9) / timeDeltaNs, nil
|
||||
}
|
||||
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
percpu := make([]uint64, len(last.Cpu.Usage.PerCpu))
|
||||
for i := range percpu {
|
||||
var err error
|
||||
percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CpuInstStats{
|
||||
Usage: CpuInstUsage{
|
||||
Total: total,
|
||||
PerCpu: percpu,
|
||||
User: user,
|
||||
System: system,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get V2 container spec from v1 container info.
|
||||
func ContainerSpecFromV1(specV1 *v1.ContainerSpec, aliases []string, namespace string) ContainerSpec {
|
||||
specV2 := ContainerSpec{
|
||||
CreationTime: specV1.CreationTime,
|
||||
HasCpu: specV1.HasCpu,
|
||||
HasMemory: specV1.HasMemory,
|
||||
HasFilesystem: specV1.HasFilesystem,
|
||||
HasNetwork: specV1.HasNetwork,
|
||||
HasDiskIo: specV1.HasDiskIo,
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
Labels: specV1.Labels,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit
|
||||
specV2.Cpu.Mask = specV1.Cpu.Mask
|
||||
}
|
||||
if specV1.HasMemory {
|
||||
specV2.Memory.Limit = specV1.Memory.Limit
|
||||
specV2.Memory.Reservation = specV1.Memory.Reservation
|
||||
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
|
||||
}
|
||||
if specV1.HasCustomMetrics {
|
||||
specV2.CustomMetrics = specV1.CustomMetrics
|
||||
}
|
||||
specV2.Aliases = aliases
|
||||
specV2.Namespace = namespace
|
||||
return specV2
|
||||
}
|
||||
|
||||
func instCpuStats(last, cur *v1.ContainerStats) (*CpuInstStats, error) {
|
||||
if last == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if !cur.Timestamp.After(last.Timestamp) {
|
||||
return nil, fmt.Errorf("container stats move backwards in time")
|
||||
}
|
||||
if len(last.Cpu.Usage.PerCpu) != len(cur.Cpu.Usage.PerCpu) {
|
||||
return nil, fmt.Errorf("different number of cpus")
|
||||
}
|
||||
timeDelta := cur.Timestamp.Sub(last.Timestamp)
|
||||
if timeDelta <= 100*time.Millisecond {
|
||||
return nil, fmt.Errorf("time delta unexpectedly small")
|
||||
}
|
||||
// Nanoseconds to gain precision and avoid having zero seconds if the
|
||||
// difference between the timestamps is just under a second
|
||||
timeDeltaNs := uint64(timeDelta.Nanoseconds())
|
||||
convertToRate := func(lastValue, curValue uint64) (uint64, error) {
|
||||
if curValue < lastValue {
|
||||
return 0, fmt.Errorf("cumulative stats decrease")
|
||||
}
|
||||
valueDelta := curValue - lastValue
|
||||
return (valueDelta * 1e9) / timeDeltaNs, nil
|
||||
}
|
||||
total, err := convertToRate(last.Cpu.Usage.Total, cur.Cpu.Usage.Total)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
percpu := make([]uint64, len(last.Cpu.Usage.PerCpu))
|
||||
for i := range percpu {
|
||||
var err error
|
||||
percpu[i], err = convertToRate(last.Cpu.Usage.PerCpu[i], cur.Cpu.Usage.PerCpu[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
user, err := convertToRate(last.Cpu.Usage.User, cur.Cpu.Usage.User)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
system, err := convertToRate(last.Cpu.Usage.System, cur.Cpu.Usage.System)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CpuInstStats{
|
||||
Usage: CpuInstUsage{
|
||||
Total: total,
|
||||
PerCpu: percpu,
|
||||
User: user,
|
||||
System: system,
|
||||
},
|
||||
}, nil
|
||||
}
|
96
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go
generated
vendored
96
Godeps/_workspace/src/github.com/google/cadvisor/info/v2/machine.go
generated
vendored
@ -16,6 +16,8 @@ package v2
|
||||
|
||||
import (
|
||||
// TODO(rjnagal): Move structs from v1.
|
||||
"time"
|
||||
|
||||
"github.com/google/cadvisor/info/v1"
|
||||
)
|
||||
|
||||
@ -86,3 +88,97 @@ func GetAttributes(mi *v1.MachineInfo, vi *v1.VersionInfo) Attributes {
|
||||
InstanceType: mi.InstanceType,
|
||||
}
|
||||
}
|
||||
|
||||
// MachineStats contains usage statistics for the entire machine.
|
||||
type MachineStats struct {
|
||||
// The time of this stat point.
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
// In nanoseconds (aggregated)
|
||||
Cpu *v1.CpuStats `json:"cpu,omitempty"`
|
||||
// In nanocores per second (instantaneous)
|
||||
CpuInst *CpuInstStats `json:"cpu_inst,omitempty"`
|
||||
// Memory statistics
|
||||
Memory *v1.MemoryStats `json:"memory,omitempty"`
|
||||
// Network statistics
|
||||
Network *NetworkStats `json:"network,omitempty"`
|
||||
// Filesystem statistics
|
||||
Filesystem []MachineFsStats `json:"filesystem,omitempty"`
|
||||
// Task load statistics
|
||||
Load *v1.LoadStats `json:"load_stats,omitempty"`
|
||||
}
|
||||
|
||||
// MachineFsStats contains per filesystem capacity and usage information.
|
||||
type MachineFsStats struct {
|
||||
// The block device name associated with the filesystem.
|
||||
Device string `json:"device"`
|
||||
|
||||
// Number of bytes that can be consumed on this filesystem.
|
||||
Capacity *uint64 `json:"capacity,omitempty"`
|
||||
|
||||
// Number of bytes that is currently consumed on this filesystem.
|
||||
Usage *uint64 `json:"usage,omitempty"`
|
||||
|
||||
// Number of bytes available for non-root user on this filesystem.
|
||||
Available *uint64 `json:"available,omitempty"`
|
||||
|
||||
// DiskStats for this device.
|
||||
DiskStats `json:"inline"`
|
||||
}
|
||||
|
||||
// DiskStats contains per partition usage information.
|
||||
// This information is only available at the machine level.
|
||||
type DiskStats struct {
|
||||
// Number of reads completed
|
||||
// This is the total number of reads completed successfully.
|
||||
ReadsCompleted *uint64 `json:"reads_completed,omitempty"`
|
||||
|
||||
// Number of reads merged
|
||||
// Reads and writes which are adjacent to each other may be merged for
|
||||
// efficiency. Thus two 4K reads may become one 8K read before it is
|
||||
// ultimately handed to the disk, and so it will be counted (and queued)
|
||||
// as only one I/O. This field lets you know how often this was done.
|
||||
ReadsMerged *uint64 `json:"reads_merged,omitempty"`
|
||||
|
||||
// Number of sectors read
|
||||
// This is the total number of sectors read successfully.
|
||||
SectorsRead *uint64 `json:"sectors_read,omitempty"`
|
||||
|
||||
// Time spent reading
|
||||
// This is the total number of milliseconds spent by all reads (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
ReadDuration *time.Duration `json:"read_duration,omitempty"`
|
||||
|
||||
// Number of writes completed
|
||||
// This is the total number of writes completed successfully.
|
||||
WritesCompleted *uint64 `json:"writes_completed,omitempty"`
|
||||
|
||||
// Number of writes merged
|
||||
// See the description of reads merged.
|
||||
WritesMerged *uint64 `json:"writes_merged,omitempty"`
|
||||
|
||||
// Number of sectors written
|
||||
// This is the total number of sectors written successfully.
|
||||
SectorsWritten *uint64 `json:"sectors_written,omitempty"`
|
||||
|
||||
// Time spent writing
|
||||
// This is the total number of milliseconds spent by all writes (as
|
||||
// measured from __make_request() to end_that_request_last()).
|
||||
WriteDuration *time.Duration `json:"write_duration,omitempty"`
|
||||
|
||||
// Number of I/Os currently in progress
|
||||
// The only field that should go to zero. Incremented as requests are
|
||||
// given to appropriate struct request_queue and decremented as they finish.
|
||||
IoInProgress *uint64 `json:"io_in_progress,omitempty"`
|
||||
|
||||
// Time spent doing I/Os
|
||||
// This field increases so long as field 9 is nonzero.
|
||||
IoDuration *time.Duration `json:"io_duration,omitempty"`
|
||||
|
||||
// weighted time spent doing I/Os
|
||||
// This field is incremented at each I/O start, I/O completion, I/O
|
||||
// merge, or read of these stats by the number of I/Os in progress
|
||||
// (field 9) times the number of milliseconds spent doing I/O since the
|
||||
// last update of this field. This can provide an easy measure of both
|
||||
// I/O completion time and the backlog that may be accumulating.
|
||||
WeightedIoDuration *time.Duration `json:"weighted_io_duration,omitempty"`
|
||||
}
|
||||
|
21
Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go
generated
vendored
21
Godeps/_workspace/src/github.com/google/cadvisor/manager/container.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os/exec"
|
||||
"path"
|
||||
"regexp"
|
||||
@ -36,14 +37,14 @@ import (
|
||||
"github.com/google/cadvisor/summary"
|
||||
"github.com/google/cadvisor/utils/cpuload"
|
||||
|
||||
"github.com/docker/docker/pkg/units"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Housekeeping interval.
|
||||
var HousekeepingInterval = flag.Duration("housekeeping_interval", 1*time.Second, "Interval between container housekeepings")
|
||||
|
||||
var cgroupPathRegExp = regexp.MustCompile(`.*devices.*:(.*?)[,;$].*`)
|
||||
var cgroupPathRegExp = regexp.MustCompile(`devices[^:]*:(.*?)[,;$]`)
|
||||
|
||||
type containerInfo struct {
|
||||
info.ContainerReference
|
||||
@ -78,6 +79,17 @@ type containerData struct {
|
||||
collectorManager collector.CollectorManager
|
||||
}
|
||||
|
||||
// jitter returns a time.Duration between duration and duration + maxFactor * duration,
|
||||
// to allow clients to avoid converging on periodic behavior. If maxFactor is 0.0, a
|
||||
// suggested default value will be chosen.
|
||||
func jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
if maxFactor <= 0.0 {
|
||||
maxFactor = 1.0
|
||||
}
|
||||
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
|
||||
return wait
|
||||
}
|
||||
|
||||
func (c *containerData) Start() error {
|
||||
go c.housekeeping()
|
||||
return nil
|
||||
@ -356,11 +368,14 @@ func (self *containerData) nextHousekeeping(lastHousekeeping time.Time) time.Tim
|
||||
}
|
||||
}
|
||||
|
||||
return lastHousekeeping.Add(self.housekeepingInterval)
|
||||
return lastHousekeeping.Add(jitter(self.housekeepingInterval, 1.0))
|
||||
}
|
||||
|
||||
// TODO(vmarmol): Implement stats collecting as a custom collector.
|
||||
func (c *containerData) housekeeping() {
|
||||
// Start any background goroutines - must be cleaned up in c.handler.Cleanup().
|
||||
c.handler.Start()
|
||||
|
||||
// Long housekeeping is either 100ms or half of the housekeeping interval.
|
||||
longHousekeeping := 100 * time.Millisecond
|
||||
if *HousekeepingInterval/2 < longHousekeeping {
|
||||
|
113
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go
generated
vendored
113
Godeps/_workspace/src/github.com/google/cadvisor/manager/manager.go
generated
vendored
@ -62,6 +62,9 @@ type Manager interface {
|
||||
// Get information about a container.
|
||||
GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error)
|
||||
|
||||
// Get V2 information about a container.
|
||||
GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error)
|
||||
|
||||
// Get information about all subcontainers of the specified container (includes self).
|
||||
SubcontainersInfo(containerName string, query *info.ContainerInfoRequest) ([]*info.ContainerInfo, error)
|
||||
|
||||
@ -375,33 +378,8 @@ func (self *manager) GetContainerSpec(containerName string, options v2.RequestOp
|
||||
|
||||
// Get V2 container spec from v1 container info.
|
||||
func (self *manager) getV2Spec(cinfo *containerInfo) v2.ContainerSpec {
|
||||
specV1 := self.getAdjustedSpec(cinfo)
|
||||
specV2 := v2.ContainerSpec{
|
||||
CreationTime: specV1.CreationTime,
|
||||
HasCpu: specV1.HasCpu,
|
||||
HasMemory: specV1.HasMemory,
|
||||
HasFilesystem: specV1.HasFilesystem,
|
||||
HasNetwork: specV1.HasNetwork,
|
||||
HasDiskIo: specV1.HasDiskIo,
|
||||
HasCustomMetrics: specV1.HasCustomMetrics,
|
||||
Image: specV1.Image,
|
||||
}
|
||||
if specV1.HasCpu {
|
||||
specV2.Cpu.Limit = specV1.Cpu.Limit
|
||||
specV2.Cpu.MaxLimit = specV1.Cpu.MaxLimit
|
||||
specV2.Cpu.Mask = specV1.Cpu.Mask
|
||||
}
|
||||
if specV1.HasMemory {
|
||||
specV2.Memory.Limit = specV1.Memory.Limit
|
||||
specV2.Memory.Reservation = specV1.Memory.Reservation
|
||||
specV2.Memory.SwapLimit = specV1.Memory.SwapLimit
|
||||
}
|
||||
if specV1.HasCustomMetrics {
|
||||
specV2.CustomMetrics = specV1.CustomMetrics
|
||||
}
|
||||
specV2.Aliases = cinfo.Aliases
|
||||
specV2.Namespace = cinfo.Namespace
|
||||
return specV2
|
||||
spec := self.getAdjustedSpec(cinfo)
|
||||
return v2.ContainerSpecFromV1(&spec, cinfo.Aliases, cinfo.Namespace)
|
||||
}
|
||||
|
||||
func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
|
||||
@ -417,7 +395,6 @@ func (self *manager) getAdjustedSpec(cinfo *containerInfo) info.ContainerSpec {
|
||||
return spec
|
||||
}
|
||||
|
||||
// Get a container by name.
|
||||
func (self *manager) GetContainerInfo(containerName string, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
|
||||
cont, err := self.getContainerData(containerName)
|
||||
if err != nil {
|
||||
@ -426,6 +403,34 @@ func (self *manager) GetContainerInfo(containerName string, query *info.Containe
|
||||
return self.containerDataToContainerInfo(cont, query)
|
||||
}
|
||||
|
||||
func (self *manager) GetContainerInfoV2(containerName string, options v2.RequestOptions) (map[string]v2.ContainerInfo, error) {
|
||||
containers, err := self.getRequestedContainers(containerName, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
infos := make(map[string]v2.ContainerInfo, len(containers))
|
||||
for name, container := range containers {
|
||||
cinfo, err := container.GetInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var nilTime time.Time // Ignored.
|
||||
stats, err := self.memoryCache.RecentStats(name, nilTime, nilTime, options.Count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
infos[name] = v2.ContainerInfo{
|
||||
Spec: self.getV2Spec(cinfo),
|
||||
Stats: v2.ContainerStatsFromV1(&cinfo.Spec, stats),
|
||||
}
|
||||
}
|
||||
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (self *manager) containerDataToContainerInfo(cont *containerData, query *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
|
||||
// Get the info from the container.
|
||||
cinfo, err := cont.GetInfo()
|
||||
@ -741,6 +746,18 @@ func (m *manager) registerCollectors(collectorConfigs map[string]string, cont *c
|
||||
|
||||
// Create a container.
|
||||
func (m *manager) createContainer(containerName string) error {
|
||||
m.containersLock.Lock()
|
||||
defer m.containersLock.Unlock()
|
||||
|
||||
namespacedName := namespacedContainerName{
|
||||
Name: containerName,
|
||||
}
|
||||
|
||||
// Check that the container didn't already exist.
|
||||
if _, ok := m.containers[namespacedName]; ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
handler, accept, err := container.NewContainerHandler(containerName, m.inHostNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -770,35 +787,15 @@ func (m *manager) createContainer(containerName string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add to the containers map.
|
||||
alreadyExists := func() bool {
|
||||
m.containersLock.Lock()
|
||||
defer m.containersLock.Unlock()
|
||||
|
||||
namespacedName := namespacedContainerName{
|
||||
Name: containerName,
|
||||
}
|
||||
|
||||
// Check that the container didn't already exist.
|
||||
_, ok := m.containers[namespacedName]
|
||||
if ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
|
||||
m.containers[namespacedName] = cont
|
||||
for _, alias := range cont.info.Aliases {
|
||||
m.containers[namespacedContainerName{
|
||||
Namespace: cont.info.Namespace,
|
||||
Name: alias,
|
||||
}] = cont
|
||||
}
|
||||
|
||||
return false
|
||||
}()
|
||||
if alreadyExists {
|
||||
return nil
|
||||
// Add the container name and all its aliases. The aliases must be within the namespace of the factory.
|
||||
m.containers[namespacedName] = cont
|
||||
for _, alias := range cont.info.Aliases {
|
||||
m.containers[namespacedContainerName{
|
||||
Namespace: cont.info.Namespace,
|
||||
Name: alias,
|
||||
}] = cont
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Added container: %q (aliases: %v, namespace: %q)", containerName, cont.info.Aliases, cont.info.Namespace)
|
||||
|
||||
contSpec, err := cont.handler.GetSpec()
|
||||
@ -822,9 +819,7 @@ func (m *manager) createContainer(containerName string) error {
|
||||
}
|
||||
|
||||
// Start the container's housekeeping.
|
||||
cont.Start()
|
||||
|
||||
return nil
|
||||
return cont.Start()
|
||||
}
|
||||
|
||||
func (m *manager) destroyContainer(containerName string) error {
|
||||
|
34
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
34
Godeps/_workspace/src/github.com/google/cadvisor/metrics/prometheus.go
generated
vendored
@ -16,6 +16,7 @@ package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
@ -126,6 +127,20 @@ func NewPrometheusCollector(infoProvider infoProvider, f ContainerNameToLabelsFu
|
||||
}
|
||||
return values
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_cache",
|
||||
help: "Number of bytes of page cache memory.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.Cache)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_rss",
|
||||
help: "Size of RSS in bytes.",
|
||||
valueType: prometheus.GaugeValue,
|
||||
getValues: func(s *info.ContainerStats) metricValues {
|
||||
return metricValues{{value: float64(s.Memory.RSS)}}
|
||||
},
|
||||
}, {
|
||||
name: "container_memory_failcnt",
|
||||
help: "Number of memory usage hits limits",
|
||||
@ -508,11 +523,20 @@ func (c *PrometheusCollector) collectContainersInfo(ch chan<- prometheus.Metric)
|
||||
if c.containerNameToLabels != nil {
|
||||
newLabels := c.containerNameToLabels(name)
|
||||
for k, v := range newLabels {
|
||||
baseLabels = append(baseLabels, k)
|
||||
baseLabels = append(baseLabels, sanitizeLabelName(k))
|
||||
baseLabelValues = append(baseLabelValues, v)
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range container.Spec.Labels {
|
||||
baseLabels = append(baseLabels, sanitizeLabelName(k))
|
||||
baseLabelValues = append(baseLabelValues, v)
|
||||
}
|
||||
for k, v := range container.Spec.Envs {
|
||||
baseLabels = append(baseLabels, sanitizeLabelName(k))
|
||||
baseLabelValues = append(baseLabelValues, v)
|
||||
}
|
||||
|
||||
// Container spec
|
||||
desc := prometheus.NewDesc("container_start_time_seconds", "Start time of the container since unix epoch in seconds.", baseLabels, nil)
|
||||
ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(container.Spec.CreationTime.Unix()), baseLabelValues...)
|
||||
@ -571,3 +595,11 @@ func specMemoryValue(v uint64) float64 {
|
||||
}
|
||||
return float64(v)
|
||||
}
|
||||
|
||||
var invalidLabelCharRE = regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||
|
||||
// sanitizeLabelName replaces anything that doesn't match
|
||||
// client_label.LabelNameRE with an underscore.
|
||||
func sanitizeLabelName(name string) string {
|
||||
return invalidLabelCharRE.ReplaceAllString(name, "_")
|
||||
}
|
||||
|
@ -103,7 +103,8 @@ func (self *elasticStorage) AddStats(ref info.ContainerReference, stats *info.Co
|
||||
Do()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(fmt.Errorf("failed to write stats to ElasticSearch- %s", err))
|
||||
fmt.Printf("failed to write stats to ElasticSearch - %s", err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
@ -135,14 +136,15 @@ func newStorage(
|
||||
)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
return nil, fmt.Errorf("failed to create the elasticsearch client - %s", err)
|
||||
}
|
||||
|
||||
// Ping the Elasticsearch server to get e.g. the version number
|
||||
info, code, err := client.Ping().URL(elasticHost).Do()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
panic(err)
|
||||
return nil, fmt.Errorf("failed to ping the elasticsearch - %s", err)
|
||||
|
||||
}
|
||||
fmt.Printf("Elasticsearch returned with code %d and version %s", code, info.Version.Number)
|
||||
|
||||
|
343
Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go
generated
vendored
343
Godeps/_workspace/src/github.com/google/cadvisor/storage/influxdb/influxdb.go
generated
vendored
@ -16,12 +16,14 @@ package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
info "github.com/google/cadvisor/info/v1"
|
||||
"github.com/google/cadvisor/storage"
|
||||
"github.com/google/cadvisor/version"
|
||||
|
||||
influxdb "github.com/influxdb/influxdb/client"
|
||||
)
|
||||
@ -31,39 +33,44 @@ func init() {
|
||||
}
|
||||
|
||||
type influxdbStorage struct {
|
||||
client *influxdb.Client
|
||||
machineName string
|
||||
tableName string
|
||||
bufferDuration time.Duration
|
||||
lastWrite time.Time
|
||||
series []*influxdb.Series
|
||||
lock sync.Mutex
|
||||
readyToFlush func() bool
|
||||
client *influxdb.Client
|
||||
machineName string
|
||||
database string
|
||||
retentionPolicy string
|
||||
bufferDuration time.Duration
|
||||
lastWrite time.Time
|
||||
points []*influxdb.Point
|
||||
lock sync.Mutex
|
||||
readyToFlush func() bool
|
||||
}
|
||||
|
||||
// Series names
|
||||
const (
|
||||
colTimestamp string = "time"
|
||||
colMachineName string = "machine"
|
||||
colContainerName string = "container_name"
|
||||
colCpuCumulativeUsage string = "cpu_cumulative_usage"
|
||||
// Cumulative CPU usage
|
||||
serCpuUsageTotal string = "cpu_usage_total"
|
||||
serCpuUsageSystem string = "cpu_usage_system"
|
||||
serCpuUsageUser string = "cpu_usage_user"
|
||||
serCpuUsagePerCpu string = "cpu_usage_per_cpu"
|
||||
// Smoothed average of number of runnable threads x 1000.
|
||||
serLoadAverage string = "load_average"
|
||||
// Memory Usage
|
||||
colMemoryUsage string = "memory_usage"
|
||||
serMemoryUsage string = "memory_usage"
|
||||
// Working set size
|
||||
colMemoryWorkingSet string = "memory_working_set"
|
||||
serMemoryWorkingSet string = "memory_working_set"
|
||||
// Cumulative count of bytes received.
|
||||
colRxBytes string = "rx_bytes"
|
||||
serRxBytes string = "rx_bytes"
|
||||
// Cumulative count of receive errors encountered.
|
||||
colRxErrors string = "rx_errors"
|
||||
serRxErrors string = "rx_errors"
|
||||
// Cumulative count of bytes transmitted.
|
||||
colTxBytes string = "tx_bytes"
|
||||
serTxBytes string = "tx_bytes"
|
||||
// Cumulative count of transmit errors encountered.
|
||||
colTxErrors string = "tx_errors"
|
||||
serTxErrors string = "tx_errors"
|
||||
// Filesystem device.
|
||||
colFsDevice = "fs_device"
|
||||
serFsDevice string = "fs_device"
|
||||
// Filesystem limit.
|
||||
colFsLimit = "fs_limit"
|
||||
serFsLimit string = "fs_limit"
|
||||
// Filesystem usage.
|
||||
colFsUsage = "fs_usage"
|
||||
serFsUsage string = "fs_usage"
|
||||
)
|
||||
|
||||
func new() (storage.StorageDriver, error) {
|
||||
@ -83,84 +90,122 @@ func new() (storage.StorageDriver, error) {
|
||||
)
|
||||
}
|
||||
|
||||
func (self *influxdbStorage) getSeriesDefaultValues(
|
||||
// Field names
|
||||
const (
|
||||
fieldValue string = "value"
|
||||
fieldType string = "type"
|
||||
fieldDevice string = "device"
|
||||
)
|
||||
|
||||
// Tag names
|
||||
const (
|
||||
tagMachineName string = "machine"
|
||||
tagContainerName string = "container_name"
|
||||
)
|
||||
|
||||
func (self *influxdbStorage) containerFilesystemStatsToPoints(
|
||||
ref info.ContainerReference,
|
||||
stats *info.ContainerStats,
|
||||
columns *[]string,
|
||||
values *[]interface{}) {
|
||||
// Timestamp
|
||||
*columns = append(*columns, colTimestamp)
|
||||
*values = append(*values, stats.Timestamp.UnixNano()/1E3)
|
||||
|
||||
// Machine name
|
||||
*columns = append(*columns, colMachineName)
|
||||
*values = append(*values, self.machineName)
|
||||
|
||||
// Container name
|
||||
*columns = append(*columns, colContainerName)
|
||||
if len(ref.Aliases) > 0 {
|
||||
*values = append(*values, ref.Aliases[0])
|
||||
} else {
|
||||
*values = append(*values, ref.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// In order to maintain a fixed column format, we add a new series for each filesystem partition.
|
||||
func (self *influxdbStorage) containerFilesystemStatsToSeries(
|
||||
ref info.ContainerReference,
|
||||
stats *info.ContainerStats) (series []*influxdb.Series) {
|
||||
stats *info.ContainerStats) (points []*influxdb.Point) {
|
||||
if len(stats.Filesystem) == 0 {
|
||||
return series
|
||||
return points
|
||||
}
|
||||
for _, fsStat := range stats.Filesystem {
|
||||
columns := make([]string, 0)
|
||||
values := make([]interface{}, 0)
|
||||
self.getSeriesDefaultValues(ref, stats, &columns, &values)
|
||||
tagsFsUsage := map[string]string{
|
||||
fieldDevice: fsStat.Device,
|
||||
fieldType: "usage",
|
||||
}
|
||||
fieldsFsUsage := map[string]interface{}{
|
||||
fieldValue: int64(fsStat.Usage),
|
||||
}
|
||||
pointFsUsage := &influxdb.Point{
|
||||
Measurement: serFsUsage,
|
||||
Tags: tagsFsUsage,
|
||||
Fields: fieldsFsUsage,
|
||||
}
|
||||
|
||||
columns = append(columns, colFsDevice)
|
||||
values = append(values, fsStat.Device)
|
||||
tagsFsLimit := map[string]string{
|
||||
fieldDevice: fsStat.Device,
|
||||
fieldType: "limit",
|
||||
}
|
||||
fieldsFsLimit := map[string]interface{}{
|
||||
fieldValue: int64(fsStat.Limit),
|
||||
}
|
||||
pointFsLimit := &influxdb.Point{
|
||||
Measurement: serFsLimit,
|
||||
Tags: tagsFsLimit,
|
||||
Fields: fieldsFsLimit,
|
||||
}
|
||||
|
||||
columns = append(columns, colFsLimit)
|
||||
values = append(values, fsStat.Limit)
|
||||
|
||||
columns = append(columns, colFsUsage)
|
||||
values = append(values, fsStat.Usage)
|
||||
series = append(series, self.newSeries(columns, values))
|
||||
points = append(points, pointFsUsage, pointFsLimit)
|
||||
}
|
||||
return series
|
||||
|
||||
self.tagPoints(ref, stats, points)
|
||||
|
||||
return points
|
||||
}
|
||||
|
||||
func (self *influxdbStorage) containerStatsToValues(
|
||||
// Set tags and timestamp for all points of the batch.
|
||||
// Points should inherit the tags that are set for BatchPoints, but that does not seem to work.
|
||||
func (self *influxdbStorage) tagPoints(ref info.ContainerReference, stats *info.ContainerStats, points []*influxdb.Point) {
|
||||
// Use container alias if possible
|
||||
var containerName string
|
||||
if len(ref.Aliases) > 0 {
|
||||
containerName = ref.Aliases[0]
|
||||
} else {
|
||||
containerName = ref.Name
|
||||
}
|
||||
|
||||
commonTags := map[string]string{
|
||||
tagMachineName: self.machineName,
|
||||
tagContainerName: containerName,
|
||||
}
|
||||
for i := 0; i < len(points); i++ {
|
||||
// merge with existing tags if any
|
||||
addTagsToPoint(points[i], commonTags)
|
||||
points[i].Time = stats.Timestamp
|
||||
}
|
||||
}
|
||||
|
||||
func (self *influxdbStorage) containerStatsToPoints(
|
||||
ref info.ContainerReference,
|
||||
stats *info.ContainerStats,
|
||||
) (columns []string, values []interface{}) {
|
||||
self.getSeriesDefaultValues(ref, stats, &columns, &values)
|
||||
// Cumulative Cpu Usage
|
||||
columns = append(columns, colCpuCumulativeUsage)
|
||||
values = append(values, stats.Cpu.Usage.Total)
|
||||
) (points []*influxdb.Point) {
|
||||
// CPU usage: Total usage in nanoseconds
|
||||
points = append(points, makePoint(serCpuUsageTotal, stats.Cpu.Usage.Total))
|
||||
|
||||
// CPU usage: Time spend in system space (in nanoseconds)
|
||||
points = append(points, makePoint(serCpuUsageSystem, stats.Cpu.Usage.System))
|
||||
|
||||
// CPU usage: Time spent in user space (in nanoseconds)
|
||||
points = append(points, makePoint(serCpuUsageUser, stats.Cpu.Usage.User))
|
||||
|
||||
// CPU usage per CPU
|
||||
for i := 0; i < len(stats.Cpu.Usage.PerCpu); i++ {
|
||||
point := makePoint(serCpuUsagePerCpu, stats.Cpu.Usage.PerCpu[i])
|
||||
tags := map[string]string{"instance": fmt.Sprintf("%v", i)}
|
||||
addTagsToPoint(point, tags)
|
||||
|
||||
points = append(points, point)
|
||||
}
|
||||
|
||||
// Load Average
|
||||
points = append(points, makePoint(serLoadAverage, stats.Cpu.LoadAverage))
|
||||
|
||||
// Memory Usage
|
||||
columns = append(columns, colMemoryUsage)
|
||||
values = append(values, stats.Memory.Usage)
|
||||
points = append(points, makePoint(serMemoryUsage, stats.Memory.Usage))
|
||||
|
||||
// Working set size
|
||||
columns = append(columns, colMemoryWorkingSet)
|
||||
values = append(values, stats.Memory.WorkingSet)
|
||||
// Working Set Size
|
||||
points = append(points, makePoint(serMemoryWorkingSet, stats.Memory.WorkingSet))
|
||||
|
||||
// Network stats.
|
||||
columns = append(columns, colRxBytes)
|
||||
values = append(values, stats.Network.RxBytes)
|
||||
// Network Stats
|
||||
points = append(points, makePoint(serRxBytes, stats.Network.RxBytes))
|
||||
points = append(points, makePoint(serRxErrors, stats.Network.RxErrors))
|
||||
points = append(points, makePoint(serTxBytes, stats.Network.TxBytes))
|
||||
points = append(points, makePoint(serTxErrors, stats.Network.TxErrors))
|
||||
|
||||
columns = append(columns, colRxErrors)
|
||||
values = append(values, stats.Network.RxErrors)
|
||||
self.tagPoints(ref, stats, points)
|
||||
|
||||
columns = append(columns, colTxBytes)
|
||||
values = append(values, stats.Network.TxBytes)
|
||||
|
||||
columns = append(columns, colTxErrors)
|
||||
values = append(values, stats.Network.TxErrors)
|
||||
|
||||
return columns, values
|
||||
return points
|
||||
}
|
||||
|
||||
func (self *influxdbStorage) OverrideReadyToFlush(readyToFlush func() bool) {
|
||||
@ -175,27 +220,38 @@ func (self *influxdbStorage) AddStats(ref info.ContainerReference, stats *info.C
|
||||
if stats == nil {
|
||||
return nil
|
||||
}
|
||||
var seriesToFlush []*influxdb.Series
|
||||
var pointsToFlush []*influxdb.Point
|
||||
func() {
|
||||
// AddStats will be invoked simultaneously from multiple threads and only one of them will perform a write.
|
||||
self.lock.Lock()
|
||||
defer self.lock.Unlock()
|
||||
|
||||
self.series = append(self.series, self.newSeries(self.containerStatsToValues(ref, stats)))
|
||||
self.series = append(self.series, self.containerFilesystemStatsToSeries(ref, stats)...)
|
||||
self.points = append(self.points, self.containerStatsToPoints(ref, stats)...)
|
||||
self.points = append(self.points, self.containerFilesystemStatsToPoints(ref, stats)...)
|
||||
if self.readyToFlush() {
|
||||
seriesToFlush = self.series
|
||||
self.series = make([]*influxdb.Series, 0)
|
||||
pointsToFlush = self.points
|
||||
self.points = make([]*influxdb.Point, 0)
|
||||
self.lastWrite = time.Now()
|
||||
}
|
||||
}()
|
||||
if len(seriesToFlush) > 0 {
|
||||
err := self.client.WriteSeriesWithTimePrecision(seriesToFlush, influxdb.Microsecond)
|
||||
if err != nil {
|
||||
if len(pointsToFlush) > 0 {
|
||||
points := make([]influxdb.Point, len(pointsToFlush))
|
||||
for i, p := range pointsToFlush {
|
||||
points[i] = *p
|
||||
}
|
||||
|
||||
batchTags := map[string]string{tagMachineName: self.machineName}
|
||||
bp := influxdb.BatchPoints{
|
||||
Points: points,
|
||||
Database: self.database,
|
||||
Tags: batchTags,
|
||||
Time: stats.Timestamp,
|
||||
}
|
||||
response, err := self.client.Write(bp)
|
||||
if err != nil || checkResponseForErrors(response) != nil {
|
||||
return fmt.Errorf("failed to write stats to influxDb - %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -204,21 +260,9 @@ func (self *influxdbStorage) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns a new influxdb series.
|
||||
func (self *influxdbStorage) newSeries(columns []string, points []interface{}) *influxdb.Series {
|
||||
out := &influxdb.Series{
|
||||
Name: self.tableName,
|
||||
Columns: columns,
|
||||
// There's only one point for each stats
|
||||
Points: make([][]interface{}, 1),
|
||||
}
|
||||
out.Points[0] = points
|
||||
return out
|
||||
}
|
||||
|
||||
// machineName: A unique identifier to identify the host that current cAdvisor
|
||||
// instance is running on.
|
||||
// influxdbHost: The host which runs influxdb.
|
||||
// influxdbHost: The host which runs influxdb (host:port)
|
||||
func newStorage(
|
||||
machineName,
|
||||
tablename,
|
||||
@ -229,28 +273,97 @@ func newStorage(
|
||||
isSecure bool,
|
||||
bufferDuration time.Duration,
|
||||
) (*influxdbStorage, error) {
|
||||
config := &influxdb.ClientConfig{
|
||||
Host: influxdbHost,
|
||||
Username: username,
|
||||
Password: password,
|
||||
Database: database,
|
||||
IsSecure: isSecure,
|
||||
url := &url.URL{
|
||||
Scheme: "http",
|
||||
Host: influxdbHost,
|
||||
}
|
||||
client, err := influxdb.NewClient(config)
|
||||
if isSecure {
|
||||
url.Scheme = "https"
|
||||
}
|
||||
|
||||
config := &influxdb.Config{
|
||||
URL: *url,
|
||||
Username: username,
|
||||
Password: password,
|
||||
UserAgent: fmt.Sprintf("%v/%v", "cAdvisor", version.Info["version"]),
|
||||
}
|
||||
client, err := influxdb.NewClient(*config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(monnand): With go 1.3, we cannot compress data now.
|
||||
client.DisableCompression()
|
||||
|
||||
ret := &influxdbStorage{
|
||||
client: client,
|
||||
machineName: machineName,
|
||||
tableName: tablename,
|
||||
database: database,
|
||||
bufferDuration: bufferDuration,
|
||||
lastWrite: time.Now(),
|
||||
series: make([]*influxdb.Series, 0),
|
||||
points: make([]*influxdb.Point, 0),
|
||||
}
|
||||
ret.readyToFlush = ret.defaultReadyToFlush
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Creates a measurement point with a single value field
|
||||
func makePoint(name string, value interface{}) *influxdb.Point {
|
||||
fields := map[string]interface{}{
|
||||
fieldValue: toSignedIfUnsigned(value),
|
||||
}
|
||||
|
||||
return &influxdb.Point{
|
||||
Measurement: name,
|
||||
Fields: fields,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds additional tags to the existing tags of a point
|
||||
func addTagsToPoint(point *influxdb.Point, tags map[string]string) {
|
||||
if point.Tags == nil {
|
||||
point.Tags = tags
|
||||
} else {
|
||||
for k, v := range tags {
|
||||
point.Tags[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Checks response for possible errors
|
||||
func checkResponseForErrors(response *influxdb.Response) error {
|
||||
const msg = "failed to write stats to influxDb - %s"
|
||||
|
||||
if response != nil && response.Err != nil {
|
||||
return fmt.Errorf(msg, response.Err)
|
||||
}
|
||||
if response != nil && response.Results != nil {
|
||||
for _, result := range response.Results {
|
||||
if result.Err != nil {
|
||||
return fmt.Errorf(msg, result.Err)
|
||||
}
|
||||
if result.Series != nil {
|
||||
for _, row := range result.Series {
|
||||
if row.Err != nil {
|
||||
return fmt.Errorf(msg, row.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Some stats have type unsigned integer, but the InfluxDB client accepts only signed integers.
|
||||
func toSignedIfUnsigned(value interface{}) interface{} {
|
||||
switch v := value.(type) {
|
||||
case uint64:
|
||||
return int64(v)
|
||||
case uint32:
|
||||
return int32(v)
|
||||
case uint16:
|
||||
return int16(v)
|
||||
case uint8:
|
||||
return int8(v)
|
||||
case uint:
|
||||
return int(v)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
2
Godeps/_workspace/src/github.com/google/cadvisor/version/VERSION
generated
vendored
2
Godeps/_workspace/src/github.com/google/cadvisor/version/VERSION
generated
vendored
@ -1 +1 @@
|
||||
0.20.1
|
||||
0.20.4
|
1
Godeps/_workspace/src/github.com/mistifyio/go-zfs/.gitignore
generated
vendored
Normal file
1
Godeps/_workspace/src/github.com/mistifyio/go-zfs/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
.vagrant
|
60
Godeps/_workspace/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
generated
vendored
Normal file
60
Godeps/_workspace/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
## How to Contribute ##
|
||||
|
||||
We always welcome contributions to help make `go-zfs` better. Please take a moment to read this document if you would like to contribute.
|
||||
|
||||
### Reporting issues ###
|
||||
|
||||
We use [Github issues](https://github.com/mistifyio/go-zfs/issues) to track bug reports, feature requests, and submitting pull requests.
|
||||
|
||||
If you find a bug:
|
||||
|
||||
* Use the GitHub issue search to check whether the bug has already been reported.
|
||||
* If the issue has been fixed, try to reproduce the issue using the latest `master` branch of the repository.
|
||||
* If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue, if possible. Also provide the steps taken to reproduce the bug.
|
||||
|
||||
### Pull requests ###
|
||||
|
||||
We welcome bug fixes, improvements, and new features. Before embarking on making significant changes, please open an issue and ask first so that you do not risk duplicating efforts or spending time working on something that may be out of scope. For minor items, just open a pull request.
|
||||
|
||||
[Fork the project](https://help.github.com/articles/fork-a-repo), clone your fork, and add the upstream to your remote:
|
||||
|
||||
$ git clone git@github.com:<your-username>/go-zfs.git
|
||||
$ cd go-zfs
|
||||
$ git remote add upstream https://github.com/mistifyio/go-zfs.git
|
||||
|
||||
If you need to pull new changes committed upstream:
|
||||
|
||||
$ git checkout master
|
||||
$ git fetch upstream
|
||||
$ git merge upstream/master
|
||||
|
||||
Don' work directly on master as this makes it harder to merge later. Create a feature branch for your fix or new feature:
|
||||
|
||||
$ git checkout -b <feature-branch-name>
|
||||
|
||||
Please try to commit your changes in logical chunks. Ideally, you should include the issue number in the commit message.
|
||||
|
||||
$ git commit -m "Issue #<issue-number> - <commit-message>"
|
||||
|
||||
Push your feature branch to your fork.
|
||||
|
||||
$ git push origin <feature-branch-name>
|
||||
|
||||
[Open a Pull Request](https://help.github.com/articles/using-pull-requests) against the upstream master branch. Please give your pull request a clear title and description and note which issue(s) your pull request fixes.
|
||||
|
||||
* All Go code should be formatted using [gofmt](http://golang.org/cmd/gofmt/).
|
||||
* Every exported function should have [documentation](http://blog.golang.org/godoc-documenting-go-code) and corresponding [tests](http://golang.org/doc/code.html#Testing).
|
||||
|
||||
**Important:** By submitting a patch, you agree to allow the project owners to license your work under the [Apache 2.0 License](./LICENSE).
|
||||
|
||||
### Go Tools ###
|
||||
For consistency and to catch minor issues for all of go code, please run the following:
|
||||
* goimports
|
||||
* go vet
|
||||
* golint
|
||||
* errcheck
|
||||
|
||||
Many editors can execute the above on save.
|
||||
|
||||
----
|
||||
Guidelines based on http://azkaban.github.io/contributing.html
|
201
Godeps/_workspace/src/github.com/mistifyio/go-zfs/LICENSE
generated
vendored
Normal file
201
Godeps/_workspace/src/github.com/mistifyio/go-zfs/LICENSE
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright (c) 2014, OmniTI Computer Consulting, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
54
Godeps/_workspace/src/github.com/mistifyio/go-zfs/README.md
generated
vendored
Normal file
54
Godeps/_workspace/src/github.com/mistifyio/go-zfs/README.md
generated
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
# Go Wrapper for ZFS #
|
||||
|
||||
Simple wrappers for ZFS command line tools.
|
||||
|
||||
[](https://godoc.org/github.com/mistifyio/go-zfs)
|
||||
|
||||
## Requirements ##
|
||||
|
||||
You need a working ZFS setup. To use on Ubuntu 14.04, setup ZFS:
|
||||
|
||||
sudo apt-get install python-software-properties
|
||||
sudo apt-add-repository ppa:zfs-native/stable
|
||||
sudo apt-get update
|
||||
sudo apt-get install ubuntu-zfs libzfs-dev
|
||||
|
||||
Developed using Go 1.3, but currently there isn't anything 1.3 specific. Don't use Ubuntu packages for Go, use http://golang.org/doc/install
|
||||
|
||||
Generally you need root privileges to use anything zfs related.
|
||||
|
||||
## Status ##
|
||||
|
||||
This has been only been tested on Ubuntu 14.04
|
||||
|
||||
In the future, we hope to work directly with libzfs.
|
||||
|
||||
# Hacking #
|
||||
|
||||
The tests have decent examples for most functions.
|
||||
|
||||
```go
|
||||
//assuming a zpool named test
|
||||
//error handling ommitted
|
||||
|
||||
|
||||
f, err := zfs.CreateFilesystem("test/snapshot-test", nil)
|
||||
ok(t, err)
|
||||
|
||||
s, err := f.Snapshot("test", nil)
|
||||
ok(t, err)
|
||||
|
||||
// snapshot is named "test/snapshot-test@test"
|
||||
|
||||
c, err := s.Clone("test/clone-test", nil)
|
||||
|
||||
err := c.Destroy()
|
||||
err := s.Destroy()
|
||||
err := f.Destroy()
|
||||
|
||||
```
|
||||
|
||||
# Contributing #
|
||||
|
||||
See the [contributing guidelines](./CONTRIBUTING.md)
|
||||
|
34
Godeps/_workspace/src/github.com/mistifyio/go-zfs/Vagrantfile
generated
vendored
Normal file
34
Godeps/_workspace/src/github.com/mistifyio/go-zfs/Vagrantfile
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "ubuntu/trusty64"
|
||||
config.ssh.forward_agent = true
|
||||
|
||||
config.vm.synced_folder ".", "/home/vagrant/go/src/github.com/mistifyio/go-zfs", create: true
|
||||
|
||||
config.vm.provision "shell", inline: <<EOF
|
||||
cat << END > /etc/profile.d/go.sh
|
||||
export GOPATH=\\$HOME/go
|
||||
export PATH=\\$GOPATH/bin:/usr/local/go/bin:\\$PATH
|
||||
END
|
||||
|
||||
chown -R vagrant /home/vagrant/go
|
||||
|
||||
apt-get update
|
||||
apt-get install -y software-properties-common curl
|
||||
apt-add-repository --yes ppa:zfs-native/stable
|
||||
apt-get update
|
||||
apt-get install -y ubuntu-zfs
|
||||
|
||||
cd /home/vagrant
|
||||
curl -z go1.3.3.linux-amd64.tar.gz -L -O https://storage.googleapis.com/golang/go1.3.3.linux-amd64.tar.gz
|
||||
tar -C /usr/local -zxf /home/vagrant/go1.3.3.linux-amd64.tar.gz
|
||||
|
||||
cat << END > /etc/sudoers.d/go
|
||||
Defaults env_keep += "GOPATH"
|
||||
END
|
||||
|
||||
EOF
|
||||
|
||||
end
|
18
Godeps/_workspace/src/github.com/mistifyio/go-zfs/error.go
generated
vendored
Normal file
18
Godeps/_workspace/src/github.com/mistifyio/go-zfs/error.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is an error which is returned when the `zfs` or `zpool` shell
|
||||
// commands return with a non-zero exit code.
|
||||
type Error struct {
|
||||
Err error
|
||||
Debug string
|
||||
Stderr string
|
||||
}
|
||||
|
||||
// Error returns the string representation of an Error.
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr)
|
||||
}
|
323
Godeps/_workspace/src/github.com/mistifyio/go-zfs/utils.go
generated
vendored
Normal file
323
Godeps/_workspace/src/github.com/mistifyio/go-zfs/utils.go
generated
vendored
Normal file
@ -0,0 +1,323 @@
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pborman/uuid"
|
||||
)
|
||||
|
||||
type command struct {
|
||||
Command string
|
||||
Stdin io.Reader
|
||||
Stdout io.Writer
|
||||
}
|
||||
|
||||
func (c *command) Run(arg ...string) ([][]string, error) {
|
||||
|
||||
cmd := exec.Command(c.Command, arg...)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
|
||||
if c.Stdout == nil {
|
||||
cmd.Stdout = &stdout
|
||||
} else {
|
||||
cmd.Stdout = c.Stdout
|
||||
}
|
||||
|
||||
if c.Stdin != nil {
|
||||
cmd.Stdin = c.Stdin
|
||||
|
||||
}
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
id := uuid.New()
|
||||
joinedArgs := strings.Join(cmd.Args, " ")
|
||||
|
||||
logger.Log([]string{"ID:" + id, "START", joinedArgs})
|
||||
err := cmd.Run()
|
||||
logger.Log([]string{"ID:" + id, "FINISH"})
|
||||
|
||||
if err != nil {
|
||||
return nil, &Error{
|
||||
Err: err,
|
||||
Debug: strings.Join([]string{cmd.Path, joinedArgs}, " "),
|
||||
Stderr: stderr.String(),
|
||||
}
|
||||
}
|
||||
|
||||
// assume if you passed in something for stdout, that you know what to do with it
|
||||
if c.Stdout != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
|
||||
//last line is always blank
|
||||
lines = lines[0 : len(lines)-1]
|
||||
output := make([][]string, len(lines))
|
||||
|
||||
for i, l := range lines {
|
||||
output[i] = strings.Fields(l)
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func setString(field *string, value string) {
|
||||
v := ""
|
||||
if value != "-" {
|
||||
v = value
|
||||
}
|
||||
*field = v
|
||||
}
|
||||
|
||||
func setUint(field *uint64, value string) error {
|
||||
var v uint64
|
||||
if value != "-" {
|
||||
var err error
|
||||
v, err = strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
*field = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ds *Dataset) parseLine(line []string) error {
|
||||
prop := line[1]
|
||||
val := line[2]
|
||||
|
||||
var err error
|
||||
|
||||
switch prop {
|
||||
case "available":
|
||||
err = setUint(&ds.Avail, val)
|
||||
case "compression":
|
||||
setString(&ds.Compression, val)
|
||||
case "mountpoint":
|
||||
setString(&ds.Mountpoint, val)
|
||||
case "quota":
|
||||
err = setUint(&ds.Quota, val)
|
||||
case "type":
|
||||
setString(&ds.Type, val)
|
||||
case "origin":
|
||||
setString(&ds.Origin, val)
|
||||
case "used":
|
||||
err = setUint(&ds.Used, val)
|
||||
case "volsize":
|
||||
err = setUint(&ds.Volsize, val)
|
||||
case "written":
|
||||
err = setUint(&ds.Written, val)
|
||||
case "logicalused":
|
||||
err = setUint(&ds.Logicalused, val)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* from zfs diff`s escape function:
|
||||
*
|
||||
* Prints a file name out a character at a time. If the character is
|
||||
* not in the range of what we consider "printable" ASCII, display it
|
||||
* as an escaped 3-digit octal value. ASCII values less than a space
|
||||
* are all control characters and we declare the upper end as the
|
||||
* DELete character. This also is the last 7-bit ASCII character.
|
||||
* We choose to treat all 8-bit ASCII as not printable for this
|
||||
* application.
|
||||
*/
|
||||
func unescapeFilepath(path string) (string, error) {
|
||||
buf := make([]byte, 0, len(path))
|
||||
llen := len(path)
|
||||
for i := 0; i < llen; {
|
||||
if path[i] == '\\' {
|
||||
if llen < i+4 {
|
||||
return "", fmt.Errorf("Invalid octal code: too short")
|
||||
}
|
||||
octalCode := path[(i + 1):(i + 4)]
|
||||
val, err := strconv.ParseUint(octalCode, 8, 8)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Invalid octal code: %v", err)
|
||||
}
|
||||
buf = append(buf, byte(val))
|
||||
i += 4
|
||||
} else {
|
||||
buf = append(buf, path[i])
|
||||
i++
|
||||
}
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
var changeTypeMap = map[string]ChangeType{
|
||||
"-": Removed,
|
||||
"+": Created,
|
||||
"M": Modified,
|
||||
"R": Renamed,
|
||||
}
|
||||
var inodeTypeMap = map[string]InodeType{
|
||||
"B": BlockDevice,
|
||||
"C": CharacterDevice,
|
||||
"/": Directory,
|
||||
">": Door,
|
||||
"|": NamedPipe,
|
||||
"@": SymbolicLink,
|
||||
"P": EventPort,
|
||||
"=": Socket,
|
||||
"F": File,
|
||||
}
|
||||
|
||||
// matches (+1) or (-1)
|
||||
var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)")
|
||||
|
||||
func parseReferenceCount(field string) (int, error) {
|
||||
matches := referenceCountRegex.FindStringSubmatch(field)
|
||||
if matches == nil {
|
||||
return 0, fmt.Errorf("Regexp does not match")
|
||||
}
|
||||
return strconv.Atoi(matches[1])
|
||||
}
|
||||
|
||||
func parseInodeChange(line []string) (*InodeChange, error) {
|
||||
llen := len(line)
|
||||
if llen < 1 {
|
||||
return nil, fmt.Errorf("Empty line passed")
|
||||
}
|
||||
|
||||
changeType := changeTypeMap[line[0]]
|
||||
if changeType == 0 {
|
||||
return nil, fmt.Errorf("Unknown change type '%s'", line[0])
|
||||
}
|
||||
|
||||
switch changeType {
|
||||
case Renamed:
|
||||
if llen != 4 {
|
||||
return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen)
|
||||
}
|
||||
case Modified:
|
||||
if llen != 4 && llen != 3 {
|
||||
return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen)
|
||||
}
|
||||
default:
|
||||
if llen != 3 {
|
||||
return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen)
|
||||
}
|
||||
}
|
||||
|
||||
inodeType := inodeTypeMap[line[1]]
|
||||
if inodeType == 0 {
|
||||
return nil, fmt.Errorf("Unknown inode type '%s'", line[1])
|
||||
}
|
||||
|
||||
path, err := unescapeFilepath(line[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse filename: %v", err)
|
||||
}
|
||||
|
||||
var newPath string
|
||||
var referenceCount int
|
||||
switch changeType {
|
||||
case Renamed:
|
||||
newPath, err = unescapeFilepath(line[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse filename: %v", err)
|
||||
}
|
||||
case Modified:
|
||||
if llen == 4 {
|
||||
referenceCount, err = parseReferenceCount(line[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse reference count: %v", err)
|
||||
}
|
||||
}
|
||||
default:
|
||||
newPath = ""
|
||||
}
|
||||
|
||||
return &InodeChange{
|
||||
Change: changeType,
|
||||
Type: inodeType,
|
||||
Path: path,
|
||||
NewPath: newPath,
|
||||
ReferenceCountChange: referenceCount,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// example input
|
||||
//M / /testpool/bar/
|
||||
//+ F /testpool/bar/hello.txt
|
||||
//M / /testpool/bar/hello.txt (+1)
|
||||
//M / /testpool/bar/hello-hardlink
|
||||
func parseInodeChanges(lines [][]string) ([]*InodeChange, error) {
|
||||
changes := make([]*InodeChange, len(lines))
|
||||
|
||||
for i, line := range lines {
|
||||
c, err := parseInodeChange(line)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line)
|
||||
}
|
||||
changes[i] = c
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func listByType(t, filter string) ([]*Dataset, error) {
|
||||
args := []string{"get", "-rHp", "-t", t, "all"}
|
||||
if filter != "" {
|
||||
args = append(args, filter)
|
||||
}
|
||||
out, err := zfs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var datasets []*Dataset
|
||||
|
||||
name := ""
|
||||
var ds *Dataset
|
||||
for _, line := range out {
|
||||
if name != line[0] {
|
||||
name = line[0]
|
||||
ds = &Dataset{Name: name}
|
||||
datasets = append(datasets, ds)
|
||||
}
|
||||
if err := ds.parseLine(line); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return datasets, nil
|
||||
}
|
||||
|
||||
func propsSlice(properties map[string]string) []string {
|
||||
args := make([]string, 0, len(properties)*3)
|
||||
for k, v := range properties {
|
||||
args = append(args, "-o")
|
||||
args = append(args, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
func (z *Zpool) parseLine(line []string) error {
|
||||
prop := line[1]
|
||||
val := line[2]
|
||||
|
||||
var err error
|
||||
|
||||
switch prop {
|
||||
case "health":
|
||||
setString(&z.Health, val)
|
||||
case "allocated":
|
||||
err = setUint(&z.Allocated, val)
|
||||
case "size":
|
||||
err = setUint(&z.Size, val)
|
||||
case "free":
|
||||
err = setUint(&z.Free, val)
|
||||
}
|
||||
return err
|
||||
}
|
390
Godeps/_workspace/src/github.com/mistifyio/go-zfs/zfs.go
generated
vendored
Normal file
390
Godeps/_workspace/src/github.com/mistifyio/go-zfs/zfs.go
generated
vendored
Normal file
@ -0,0 +1,390 @@
|
||||
// Package zfs provides wrappers around the ZFS command line tools.
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ZFS dataset types, which can indicate if a dataset is a filesystem,
|
||||
// snapshot, or volume.
|
||||
const (
|
||||
DatasetFilesystem = "filesystem"
|
||||
DatasetSnapshot = "snapshot"
|
||||
DatasetVolume = "volume"
|
||||
)
|
||||
|
||||
// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot,
|
||||
// or volume. The Type struct member can be used to determine a dataset's type.
|
||||
//
|
||||
// The field definitions can be found in the ZFS manual:
|
||||
// http://www.freebsd.org/cgi/man.cgi?zfs(8).
|
||||
type Dataset struct {
|
||||
Name string
|
||||
Origin string
|
||||
Used uint64
|
||||
Avail uint64
|
||||
Mountpoint string
|
||||
Compression string
|
||||
Type string
|
||||
Written uint64
|
||||
Volsize uint64
|
||||
Usedbydataset uint64
|
||||
Logicalused uint64
|
||||
Quota uint64
|
||||
}
|
||||
|
||||
// InodeType is the type of inode as reported by Diff
|
||||
type InodeType int
|
||||
|
||||
// Types of Inodes
|
||||
const (
|
||||
_ = iota // 0 == unknown type
|
||||
BlockDevice InodeType = iota
|
||||
CharacterDevice
|
||||
Directory
|
||||
Door
|
||||
NamedPipe
|
||||
SymbolicLink
|
||||
EventPort
|
||||
Socket
|
||||
File
|
||||
)
|
||||
|
||||
// ChangeType is the type of inode change as reported by Diff
|
||||
type ChangeType int
|
||||
|
||||
// Types of Changes
|
||||
const (
|
||||
_ = iota // 0 == unknown type
|
||||
Removed ChangeType = iota
|
||||
Created
|
||||
Modified
|
||||
Renamed
|
||||
)
|
||||
|
||||
// DestroyFlag is the options flag passed to Destroy
|
||||
type DestroyFlag int
|
||||
|
||||
// Valid destroy options
|
||||
const (
|
||||
DestroyDefault DestroyFlag = 1 << iota
|
||||
DestroyRecursive = 1 << iota
|
||||
DestroyRecursiveClones = 1 << iota
|
||||
DestroyDeferDeletion = 1 << iota
|
||||
DestroyForceUmount = 1 << iota
|
||||
)
|
||||
|
||||
// InodeChange represents a change as reported by Diff
|
||||
type InodeChange struct {
|
||||
Change ChangeType
|
||||
Type InodeType
|
||||
Path string
|
||||
NewPath string
|
||||
ReferenceCountChange int
|
||||
}
|
||||
|
||||
// Logger can be used to log commands/actions
|
||||
type Logger interface {
|
||||
Log(cmd []string)
|
||||
}
|
||||
|
||||
type defaultLogger struct{}
|
||||
|
||||
func (*defaultLogger) Log(cmd []string) {
|
||||
return
|
||||
}
|
||||
|
||||
var logger Logger = &defaultLogger{}
|
||||
|
||||
// SetLogger set a log handler to log all commands including arguments before
|
||||
// they are executed
|
||||
func SetLogger(l Logger) {
|
||||
if l != nil {
|
||||
logger = l
|
||||
}
|
||||
}
|
||||
|
||||
// zfs is a helper function to wrap typical calls to zfs.
|
||||
func zfs(arg ...string) ([][]string, error) {
|
||||
c := command{Command: "zfs"}
|
||||
return c.Run(arg...)
|
||||
}
|
||||
|
||||
// Datasets returns a slice of ZFS datasets, regardless of type.
|
||||
// A filter argument may be passed to select a dataset with the matching name,
|
||||
// or empty string ("") may be used to select all datasets.
|
||||
func Datasets(filter string) ([]*Dataset, error) {
|
||||
return listByType("all", filter)
|
||||
}
|
||||
|
||||
// Snapshots returns a slice of ZFS snapshots.
|
||||
// A filter argument may be passed to select a snapshot with the matching name,
|
||||
// or empty string ("") may be used to select all snapshots.
|
||||
func Snapshots(filter string) ([]*Dataset, error) {
|
||||
return listByType(DatasetSnapshot, filter)
|
||||
}
|
||||
|
||||
// Filesystems returns a slice of ZFS filesystems.
|
||||
// A filter argument may be passed to select a filesystem with the matching name,
|
||||
// or empty string ("") may be used to select all filesystems.
|
||||
func Filesystems(filter string) ([]*Dataset, error) {
|
||||
return listByType(DatasetFilesystem, filter)
|
||||
}
|
||||
|
||||
// Volumes returns a slice of ZFS volumes.
|
||||
// A filter argument may be passed to select a volume with the matching name,
|
||||
// or empty string ("") may be used to select all volumes.
|
||||
func Volumes(filter string) ([]*Dataset, error) {
|
||||
return listByType(DatasetVolume, filter)
|
||||
}
|
||||
|
||||
// GetDataset retrieves a single ZFS dataset by name. This dataset could be
|
||||
// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume.
|
||||
func GetDataset(name string) (*Dataset, error) {
|
||||
out, err := zfs("get", "-Hp", "all", name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ds := &Dataset{Name: name}
|
||||
for _, line := range out {
|
||||
if err := ds.parseLine(line); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// Clone clones a ZFS snapshot and returns a clone dataset.
|
||||
// An error will be returned if the input dataset is not of snapshot type.
|
||||
func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) {
|
||||
if d.Type != DatasetSnapshot {
|
||||
return nil, errors.New("can only clone snapshots")
|
||||
}
|
||||
args := make([]string, 2, 4)
|
||||
args[0] = "clone"
|
||||
args[1] = "-p"
|
||||
if properties != nil {
|
||||
args = append(args, propsSlice(properties)...)
|
||||
}
|
||||
args = append(args, []string{d.Name, dest}...)
|
||||
_, err := zfs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetDataset(dest)
|
||||
}
|
||||
|
||||
// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a
|
||||
// new snapshot with the specified name, and streams the input data into the
|
||||
// newly-created snapshot.
|
||||
func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) {
|
||||
c := command{Command: "zfs", Stdin: input}
|
||||
_, err := c.Run("receive", name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetDataset(name)
|
||||
}
|
||||
|
||||
// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer.
|
||||
// An error will be returned if the input dataset is not of snapshot type.
|
||||
func (d *Dataset) SendSnapshot(output io.Writer) error {
|
||||
if d.Type != DatasetSnapshot {
|
||||
return errors.New("can only send snapshots")
|
||||
}
|
||||
|
||||
c := command{Command: "zfs", Stdout: output}
|
||||
_, err := c.Run("send", d.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateVolume creates a new ZFS volume with the specified name, size, and
|
||||
// properties.
|
||||
// A full list of available ZFS properties may be found here:
|
||||
// https://www.freebsd.org/cgi/man.cgi?zfs(8).
|
||||
func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) {
|
||||
args := make([]string, 4, 5)
|
||||
args[0] = "create"
|
||||
args[1] = "-p"
|
||||
args[2] = "-V"
|
||||
args[3] = strconv.FormatUint(size, 10)
|
||||
if properties != nil {
|
||||
args = append(args, propsSlice(properties)...)
|
||||
}
|
||||
args = append(args, name)
|
||||
_, err := zfs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetDataset(name)
|
||||
}
|
||||
|
||||
// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any
|
||||
// descendents of the dataset will be recursively destroyed, including snapshots.
|
||||
// If the deferred bit flag is set, the snapshot is marked for deferred
|
||||
// deletion.
|
||||
func (d *Dataset) Destroy(flags DestroyFlag) error {
|
||||
args := make([]string, 1, 3)
|
||||
args[0] = "destroy"
|
||||
if flags&DestroyRecursive != 0 {
|
||||
args = append(args, "-r")
|
||||
}
|
||||
|
||||
if flags&DestroyRecursiveClones != 0 {
|
||||
args = append(args, "-R")
|
||||
}
|
||||
|
||||
if flags&DestroyDeferDeletion != 0 {
|
||||
args = append(args, "-d")
|
||||
}
|
||||
|
||||
if flags&DestroyForceUmount != 0 {
|
||||
args = append(args, "-f")
|
||||
}
|
||||
|
||||
args = append(args, d.Name)
|
||||
_, err := zfs(args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// SetProperty sets a ZFS property on the receiving dataset.
|
||||
// A full list of available ZFS properties may be found here:
|
||||
// https://www.freebsd.org/cgi/man.cgi?zfs(8).
|
||||
func (d *Dataset) SetProperty(key, val string) error {
|
||||
prop := strings.Join([]string{key, val}, "=")
|
||||
_, err := zfs("set", prop, d.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetProperty returns the current value of a ZFS property from the
|
||||
// receiving dataset.
|
||||
// A full list of available ZFS properties may be found here:
|
||||
// https://www.freebsd.org/cgi/man.cgi?zfs(8).
|
||||
func (d *Dataset) GetProperty(key string) (string, error) {
|
||||
out, err := zfs("get", key, d.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return out[0][2], nil
|
||||
}
|
||||
|
||||
// Snapshots returns a slice of all ZFS snapshots of a given dataset.
|
||||
func (d *Dataset) Snapshots() ([]*Dataset, error) {
|
||||
return Snapshots(d.Name)
|
||||
}
|
||||
|
||||
// CreateFilesystem creates a new ZFS filesystem with the specified name and
|
||||
// properties.
|
||||
// A full list of available ZFS properties may be found here:
|
||||
// https://www.freebsd.org/cgi/man.cgi?zfs(8).
|
||||
func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) {
|
||||
args := make([]string, 1, 4)
|
||||
args[0] = "create"
|
||||
|
||||
if properties != nil {
|
||||
args = append(args, propsSlice(properties)...)
|
||||
}
|
||||
|
||||
args = append(args, name)
|
||||
_, err := zfs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetDataset(name)
|
||||
}
|
||||
|
||||
// Snapshot creates a new ZFS snapshot of the receiving dataset, using the
|
||||
// specified name. Optionally, the snapshot can be taken recursively, creating
|
||||
// snapshots of all descendent filesystems in a single, atomic operation.
|
||||
func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) {
|
||||
args := make([]string, 1, 4)
|
||||
args[0] = "snapshot"
|
||||
if recursive {
|
||||
args = append(args, "-r")
|
||||
}
|
||||
snapName := fmt.Sprintf("%s@%s", d.Name, name)
|
||||
args = append(args, snapName)
|
||||
_, err := zfs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return GetDataset(snapName)
|
||||
}
|
||||
|
||||
// Rollback rolls back the receiving ZFS dataset to a previous snapshot.
|
||||
// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot
|
||||
// rollback cannot be completed without this option, if more recent
|
||||
// snapshots exist.
|
||||
// An error will be returned if the input dataset is not of snapshot type.
|
||||
func (d *Dataset) Rollback(destroyMoreRecent bool) error {
|
||||
if d.Type != DatasetSnapshot {
|
||||
return errors.New("can only rollback snapshots")
|
||||
}
|
||||
|
||||
args := make([]string, 1, 3)
|
||||
args[0] = "rollback"
|
||||
if destroyMoreRecent {
|
||||
args = append(args, "-r")
|
||||
}
|
||||
args = append(args, d.Name)
|
||||
|
||||
_, err := zfs(args...)
|
||||
return err
|
||||
}
|
||||
|
||||
// Children returns a slice of children of the receiving ZFS dataset.
|
||||
// A recursion depth may be specified, or a depth of 0 allows unlimited
|
||||
// recursion.
|
||||
func (d *Dataset) Children(depth uint64) ([]*Dataset, error) {
|
||||
args := []string{"get", "-t", "all", "-Hp", "all"}
|
||||
if depth > 0 {
|
||||
args = append(args, "-d")
|
||||
args = append(args, strconv.FormatUint(depth, 10))
|
||||
} else {
|
||||
args = append(args, "-r")
|
||||
}
|
||||
args = append(args, d.Name)
|
||||
|
||||
out, err := zfs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var datasets []*Dataset
|
||||
name := ""
|
||||
var ds *Dataset
|
||||
for _, line := range out {
|
||||
if name != line[0] {
|
||||
name = line[0]
|
||||
ds = &Dataset{Name: name}
|
||||
datasets = append(datasets, ds)
|
||||
}
|
||||
if err := ds.parseLine(line); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return datasets[1:], nil
|
||||
}
|
||||
|
||||
// Diff returns changes between a snapshot and the given ZFS dataset.
|
||||
// The snapshot name must include the filesystem part as it is possible to
|
||||
// compare clones with their origin snapshots.
|
||||
func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) {
|
||||
args := []string{"diff", "-FH", snapshot, d.Name}[:]
|
||||
out, err := zfs(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inodeChanges, err := parseInodeChanges(out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return inodeChanges, nil
|
||||
}
|
105
Godeps/_workspace/src/github.com/mistifyio/go-zfs/zpool.go
generated
vendored
Normal file
105
Godeps/_workspace/src/github.com/mistifyio/go-zfs/zpool.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
package zfs
|
||||
|
||||
// ZFS zpool states, which can indicate if a pool is online, offline,
|
||||
// degraded, etc. More information regarding zpool states can be found here:
|
||||
// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html.
|
||||
const (
|
||||
ZpoolOnline = "ONLINE"
|
||||
ZpoolDegraded = "DEGRADED"
|
||||
ZpoolFaulted = "FAULTED"
|
||||
ZpoolOffline = "OFFLINE"
|
||||
ZpoolUnavail = "UNAVAIL"
|
||||
ZpoolRemoved = "REMOVED"
|
||||
)
|
||||
|
||||
// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can
|
||||
// contain many descendent datasets.
|
||||
type Zpool struct {
|
||||
Name string
|
||||
Health string
|
||||
Allocated uint64
|
||||
Size uint64
|
||||
Free uint64
|
||||
}
|
||||
|
||||
// zpool is a helper function to wrap typical calls to zpool.
|
||||
func zpool(arg ...string) ([][]string, error) {
|
||||
c := command{Command: "zpool"}
|
||||
return c.Run(arg...)
|
||||
}
|
||||
|
||||
// GetZpool retrieves a single ZFS zpool by name.
|
||||
func GetZpool(name string) (*Zpool, error) {
|
||||
out, err := zpool("get", "all", "-p", name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// there is no -H
|
||||
out = out[1:]
|
||||
|
||||
z := &Zpool{Name: name}
|
||||
for _, line := range out {
|
||||
if err := z.parseLine(line); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return z, nil
|
||||
}
|
||||
|
||||
// Datasets returns a slice of all ZFS datasets in a zpool.
|
||||
func (z *Zpool) Datasets() ([]*Dataset, error) {
|
||||
return Datasets(z.Name)
|
||||
}
|
||||
|
||||
// Snapshots returns a slice of all ZFS snapshots in a zpool.
|
||||
func (z *Zpool) Snapshots() ([]*Dataset, error) {
|
||||
return Snapshots(z.Name)
|
||||
}
|
||||
|
||||
// CreateZpool creates a new ZFS zpool with the specified name, properties,
|
||||
// and optional arguments.
|
||||
// A full list of available ZFS properties and command-line arguments may be
|
||||
// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8).
|
||||
func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) {
|
||||
cli := make([]string, 1, 4)
|
||||
cli[0] = "create"
|
||||
if properties != nil {
|
||||
cli = append(cli, propsSlice(properties)...)
|
||||
}
|
||||
cli = append(cli, name)
|
||||
cli = append(cli, args...)
|
||||
_, err := zpool(cli...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Zpool{Name: name}, nil
|
||||
}
|
||||
|
||||
// Destroy destroys a ZFS zpool by name.
|
||||
func (z *Zpool) Destroy() error {
|
||||
_, err := zpool("destroy", z.Name)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListZpools list all ZFS zpools accessible on the current system.
|
||||
func ListZpools() ([]*Zpool, error) {
|
||||
args := []string{"list", "-Ho", "name"}
|
||||
out, err := zpool(args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pools []*Zpool
|
||||
|
||||
for _, line := range out {
|
||||
z, err := GetZpool(line[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pools = append(pools, z)
|
||||
}
|
||||
return pools, nil
|
||||
}
|
@ -24,7 +24,7 @@ import (
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
var KnownKubeletMetrics = map[string][]string{
|
||||
var NecessaryKubeletMetrics = map[string][]string{
|
||||
"cadvisor_version_info": {"cadvisorRevision", "cadvisorVersion", "dockerVersion", "kernelVersion", "osVersion"},
|
||||
"container_cpu_system_seconds_total": {"id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name"},
|
||||
"container_cpu_usage_seconds_total": {"id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name", "cpu"},
|
||||
@ -43,6 +43,8 @@ var KnownKubeletMetrics = map[string][]string{
|
||||
"container_fs_writes_merged_total": {"device", "id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name"},
|
||||
"container_fs_writes_total": {"device", "id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name"},
|
||||
"container_last_seen": {"id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name"},
|
||||
"container_memory_cache": {},
|
||||
"container_memory_rss": {},
|
||||
"container_memory_failcnt": {"id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name"},
|
||||
"container_memory_failures_total": {"id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name", "scope", "type"},
|
||||
"container_memory_usage_bytes": {"id", "image", "kubernetes_container_name", "kubernetes_namespace", "kubernetes_pod_name", "name"},
|
||||
@ -116,15 +118,15 @@ func (m *KubeletMetrics) Equal(o KubeletMetrics) bool {
|
||||
|
||||
func NewKubeletMetrics() KubeletMetrics {
|
||||
result := NewMetrics()
|
||||
for metric := range KnownKubeletMetrics {
|
||||
for metric := range NecessaryKubeletMetrics {
|
||||
result[metric] = make(model.Samples, 0)
|
||||
}
|
||||
return KubeletMetrics(result)
|
||||
}
|
||||
|
||||
func parseKubeletMetrics(data string, unknownMetrics sets.String) (KubeletMetrics, error) {
|
||||
func parseKubeletMetrics(data string) (KubeletMetrics, error) {
|
||||
result := NewKubeletMetrics()
|
||||
if err := parseMetrics(data, KnownKubeletMetrics, (*Metrics)(&result), unknownMetrics); err != nil {
|
||||
if err := parseMetrics(data, NecessaryKubeletMetrics, (*Metrics)(&result), nil); err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
return result, nil
|
||||
|
@ -85,7 +85,7 @@ func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controll
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) GrabFromKubelet(nodeName string, unknownMetrics sets.String) (KubeletMetrics, error) {
|
||||
func (g *MetricsGrabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
|
||||
nodes, err := g.client.Nodes().List(api.ListOptions{FieldSelector: fields.Set{client.ObjectNameField: nodeName}.AsSelector()})
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
@ -94,10 +94,10 @@ func (g *MetricsGrabber) GrabFromKubelet(nodeName string, unknownMetrics sets.St
|
||||
return KubeletMetrics{}, fmt.Errorf("Error listing nodes with name %v, got %v", nodeName, nodes.Items)
|
||||
}
|
||||
kubeletPort := nodes.Items[0].Status.DaemonEndpoints.KubeletEndpoint.Port
|
||||
return g.grabFromKubeletInternal(nodeName, kubeletPort, unknownMetrics)
|
||||
return g.grabFromKubeletInternal(nodeName, kubeletPort)
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) grabFromKubeletInternal(nodeName string, kubeletPort int, unknownMetrics sets.String) (KubeletMetrics, error) {
|
||||
func (g *MetricsGrabber) grabFromKubeletInternal(nodeName string, kubeletPort int) (KubeletMetrics, error) {
|
||||
if kubeletPort <= 0 || kubeletPort > 65535 {
|
||||
return KubeletMetrics{}, fmt.Errorf("Invalid Kubelet port %v. Skipping Kubelet's metrics gathering.", kubeletPort)
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (g *MetricsGrabber) grabFromKubeletInternal(nodeName string, kubeletPort in
|
||||
if err != nil {
|
||||
return KubeletMetrics{}, err
|
||||
}
|
||||
return parseKubeletMetrics(output, unknownMetrics)
|
||||
return parseKubeletMetrics(output)
|
||||
}
|
||||
|
||||
func (g *MetricsGrabber) GrabFromScheduler(unknownMetrics sets.String) (SchedulerMetrics, error) {
|
||||
@ -173,7 +173,7 @@ func (g *MetricsGrabber) Grab(unknownMetrics sets.String) (MetricsCollection, er
|
||||
} else {
|
||||
for _, node := range nodes.Items {
|
||||
kubeletPort := node.Status.DaemonEndpoints.KubeletEndpoint.Port
|
||||
metrics, err := g.grabFromKubeletInternal(node.Name, kubeletPort, nil)
|
||||
metrics, err := g.grabFromKubeletInternal(node.Name, kubeletPort)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func validateLabelSet(labelSet map[string][]string, data metrics.Metrics, invali
|
||||
invalidLabel = false
|
||||
}
|
||||
}
|
||||
if invalidLabel {
|
||||
if invalidLabel && invalidLabels != nil {
|
||||
if _, ok := invalidLabels[metric]; !ok {
|
||||
invalidLabels[metric] = sets.NewString()
|
||||
}
|
||||
@ -59,13 +59,21 @@ func validateLabelSet(labelSet map[string][]string, data metrics.Metrics, invali
|
||||
}
|
||||
}
|
||||
|
||||
func checkNecessaryMetrics(response metrics.Metrics, necessaryMetrics map[string][]string) {
|
||||
missingLabels := make(map[string]sets.String)
|
||||
validateLabelSet(metrics.CommonMetrics, response, nil, missingLabels)
|
||||
validateLabelSet(necessaryMetrics, response, nil, missingLabels)
|
||||
|
||||
Expect(missingLabels).To(BeEmpty())
|
||||
}
|
||||
|
||||
func checkMetrics(response metrics.Metrics, assumedMetrics map[string][]string) {
|
||||
invalidLabels := make(map[string]sets.String)
|
||||
unknownLabels := make(map[string]sets.String)
|
||||
validateLabelSet(metrics.CommonMetrics, response, invalidLabels, unknownLabels)
|
||||
validateLabelSet(assumedMetrics, response, invalidLabels, unknownLabels)
|
||||
missingLabels := make(map[string]sets.String)
|
||||
validateLabelSet(metrics.CommonMetrics, response, invalidLabels, missingLabels)
|
||||
validateLabelSet(assumedMetrics, response, invalidLabels, missingLabels)
|
||||
|
||||
Expect(unknownLabels).To(BeEmpty())
|
||||
Expect(missingLabels).To(BeEmpty())
|
||||
Expect(invalidLabels).To(BeEmpty())
|
||||
}
|
||||
|
||||
@ -97,12 +105,9 @@ var _ = Describe("MetricsGrabber", func() {
|
||||
By("Connecting proxying to Node through the API server")
|
||||
nodes := ListSchedulableNodesOrDie(c)
|
||||
Expect(nodes.Items).NotTo(BeEmpty())
|
||||
unknownMetrics := sets.NewString()
|
||||
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name, unknownMetrics)
|
||||
response, err := grabber.GrabFromKubelet(nodes.Items[0].Name)
|
||||
expectNoError(err)
|
||||
Expect(unknownMetrics).To(BeEmpty())
|
||||
|
||||
checkMetrics(metrics.Metrics(response), metrics.KnownKubeletMetrics)
|
||||
checkNecessaryMetrics(metrics.Metrics(response), metrics.NecessaryKubeletMetrics)
|
||||
}
|
||||
})
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user