Merge branch 'master' into correct-deprecation-errors

This commit is contained in:
Alexander Campbell 2017-05-22 09:55:07 -07:00
commit c9c8d61048
320 changed files with 26504 additions and 124041 deletions

32
Godeps/Godeps.json generated
View File

@ -1219,6 +1219,22 @@
"ImportPath": "github.com/golang/protobuf/proto",
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
},
{
"ImportPath": "github.com/golang/protobuf/ptypes",
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
},
{
"ImportPath": "github.com/golang/protobuf/ptypes/any",
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
},
{
"ImportPath": "github.com/golang/protobuf/ptypes/duration",
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
},
{
"ImportPath": "github.com/golang/protobuf/ptypes/timestamp",
"Rev": "4bd1920723d7b7c925de087aa32e2187708897f7"
},
{
"ImportPath": "github.com/google/btree",
"Rev": "7d79101e329e5a3adf994758c578dab82b90c017"
@ -1452,6 +1468,18 @@
"ImportPath": "github.com/google/gofuzz",
"Rev": "44d81051d367757e1c7c6a5a86423ece9afcf63c"
},
{
"ImportPath": "github.com/googleapis/gnostic/OpenAPIv2",
"Rev": "68f4ded48ba9414dab2ae69b3f0d69971da73aa5"
},
{
"ImportPath": "github.com/googleapis/gnostic/compiler",
"Rev": "68f4ded48ba9414dab2ae69b3f0d69971da73aa5"
},
{
"ImportPath": "github.com/googleapis/gnostic/extensions",
"Rev": "68f4ded48ba9414dab2ae69b3f0d69971da73aa5"
},
{
"ImportPath": "github.com/gophercloud/gophercloud",
"Rev": "b06120d13e262ceaf890ef38ee30898813696af0"
@ -2700,10 +2728,6 @@
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/compute/v0.alpha",
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
},
{
"ImportPath": "google.golang.org/api/compute/v0.beta",
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"

824
Godeps/LICENSES generated
View File

@ -39399,6 +39399,162 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/github.com/golang/protobuf/ptypes licensed under: =
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/github.com/golang/protobuf/LICENSE 14db3a56c3796a940ba32948a15f97d0 -
================================================================================
================================================================================
= vendor/github.com/golang/protobuf/ptypes/any licensed under: =
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/github.com/golang/protobuf/LICENSE 14db3a56c3796a940ba32948a15f97d0 -
================================================================================
================================================================================
= vendor/github.com/golang/protobuf/ptypes/duration licensed under: =
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/github.com/golang/protobuf/LICENSE 14db3a56c3796a940ba32948a15f97d0 -
================================================================================
================================================================================
= vendor/github.com/golang/protobuf/ptypes/timestamp licensed under: =
Go support for Protocol Buffers - Google's data interchange format
Copyright 2010 The Go Authors. All rights reserved.
https://github.com/golang/protobuf
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/github.com/golang/protobuf/LICENSE 14db3a56c3796a940ba32948a15f97d0 -
================================================================================
================================================================================
= vendor/github.com/google/btree licensed under: =
@ -48987,6 +49143,639 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/github.com/googleapis/gnostic/compiler licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/googleapis/gnostic/LICENSE b1e01b26bacfc2232046c90a330332b3 -
================================================================================
================================================================================
= vendor/github.com/googleapis/gnostic/extensions licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/googleapis/gnostic/LICENSE b1e01b26bacfc2232046c90a330332b3 -
================================================================================
================================================================================
= vendor/github.com/googleapis/gnostic/OpenAPIv2 licensed under: =
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
= vendor/github.com/googleapis/gnostic/LICENSE b1e01b26bacfc2232046c90a330332b3 -
================================================================================
================================================================================
= vendor/github.com/gophercloud/gophercloud licensed under: =
@ -81788,41 +82577,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================================
================================================================================
= vendor/google.golang.org/api/compute/v0.alpha licensed under: =
Copyright (c) 2011 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
= vendor/google.golang.org/api/LICENSE a651bb3d8b1c412632e28823bb432b40 -
================================================================================
================================================================================
= vendor/google.golang.org/api/compute/v0.beta licensed under: =

View File

@ -42972,6 +42972,10 @@
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"type": "string"
},
"kind": {
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared",
"type": "string"
},
"readOnly": {
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
"type": "boolean"

View File

@ -3610,6 +3610,10 @@
"readOnly": {
"type": "boolean",
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
},
"kind": {
"$ref": "v1.AzureDataDiskKind",
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"
}
}
},
@ -3617,6 +3621,10 @@
"id": "v1.AzureDataDiskCachingMode",
"properties": {}
},
"v1.AzureDataDiskKind": {
"id": "v1.AzureDataDiskKind",
"properties": {}
},
"v1.PhotonPersistentDiskVolumeSource": {
"id": "v1.PhotonPersistentDiskVolumeSource",
"description": "Represents a Photon Controller persistent disk resource.",

View File

@ -2345,6 +2345,10 @@
"readOnly": {
"type": "boolean",
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
},
"kind": {
"$ref": "v1.AzureDataDiskKind",
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"
}
}
},
@ -2352,6 +2356,10 @@
"id": "v1.AzureDataDiskCachingMode",
"properties": {}
},
"v1.AzureDataDiskKind": {
"id": "v1.AzureDataDiskKind",
"properties": {}
},
"v1.PhotonPersistentDiskVolumeSource": {
"id": "v1.PhotonPersistentDiskVolumeSource",
"description": "Represents a Photon Controller persistent disk resource.",

View File

@ -3426,6 +3426,10 @@
"readOnly": {
"type": "boolean",
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
},
"kind": {
"$ref": "v1.AzureDataDiskKind",
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"
}
}
},
@ -3433,6 +3437,10 @@
"id": "v1.AzureDataDiskCachingMode",
"properties": {}
},
"v1.AzureDataDiskKind": {
"id": "v1.AzureDataDiskKind",
"properties": {}
},
"v1.PhotonPersistentDiskVolumeSource": {
"id": "v1.PhotonPersistentDiskVolumeSource",
"description": "Represents a Photon Controller persistent disk resource.",

View File

@ -8065,6 +8065,10 @@
"readOnly": {
"type": "boolean",
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
},
"kind": {
"$ref": "v1.AzureDataDiskKind",
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"
}
}
},
@ -8072,6 +8076,10 @@
"id": "v1.AzureDataDiskCachingMode",
"properties": {}
},
"v1.AzureDataDiskKind": {
"id": "v1.AzureDataDiskKind",
"properties": {}
},
"v1.PhotonPersistentDiskVolumeSource": {
"id": "v1.PhotonPersistentDiskVolumeSource",
"description": "Represents a Photon Controller persistent disk resource.",

View File

@ -2172,6 +2172,10 @@
"readOnly": {
"type": "boolean",
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
},
"kind": {
"$ref": "v1.AzureDataDiskKind",
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"
}
}
},
@ -2179,6 +2183,10 @@
"id": "v1.AzureDataDiskCachingMode",
"properties": {}
},
"v1.AzureDataDiskKind": {
"id": "v1.AzureDataDiskKind",
"properties": {}
},
"v1.PhotonPersistentDiskVolumeSource": {
"id": "v1.PhotonPersistentDiskVolumeSource",
"description": "Represents a Photon Controller persistent disk resource.",

View File

@ -19432,6 +19432,10 @@
"readOnly": {
"type": "boolean",
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
},
"kind": {
"$ref": "v1.AzureDataDiskKind",
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"
}
}
},
@ -19439,6 +19443,10 @@
"id": "v1.AzureDataDiskCachingMode",
"properties": {}
},
"v1.AzureDataDiskKind": {
"id": "v1.AzureDataDiskKind",
"properties": {}
},
"v1.PhotonPersistentDiskVolumeSource": {
"id": "v1.PhotonPersistentDiskVolumeSource",
"description": "Represents a Photon Controller persistent disk resource.",

View File

@ -1,11 +1,17 @@
# Calico Policy Controller
==============
Calico Policy Controller is an implementation of the Kubernetes network policy API.
Calico is an implementation of the Kubernetes network policy API. The provided manifest installs a DaemonSet which runs Calico on each node in the cluster.
Learn more at:
- https://github.com/projectcalico/k8s-policy
- http://kubernetes.io/docs/user-guide/networkpolicies/
### Templating
The provided `calico-node.yaml` manifest includes the following placeholders which are populated
via templating.
- `__CLUSTER_CIDR__`: The IP range from which Pod IP addresses are assigned.
### Learn More
Learn more about Calico at http://docs.projectcalico.org
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/calico-policy-controller/README.md?pixel)]()

View File

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: calico-etcd
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
name: calico-etcd
namespace: kube-system
spec:
clusterIP: 10.0.0.17
ports:
- port: 6666
selector:
k8s-app: calico-etcd

View File

@ -1,42 +0,0 @@
apiVersion: "apps/v1beta1"
kind: StatefulSet
metadata:
name: calico-etcd
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: calico-etcd
spec:
serviceName: calico-etcd
replicas: 1
template:
metadata:
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-etcd
spec:
hostNetwork: true
containers:
- name: calico-etcd
image: gcr.io/google_containers/etcd:2.2.1
env:
- name: CALICO_ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumeClaimTemplates:
- metadata:
name: var-etcd
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,136 @@
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: |
[{"key": "dedicated", "value": "master", "effect": "NoSchedule" },
{"key":"CriticalAddonsOnly", "operator":"Exists"}]
spec:
nodeSelector:
projectcalico.org/ds-ready: "true"
hostNetwork: true
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v1.2.1
env:
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: CALICO_NETWORKING_BACKEND
value: "none"
- name: CALICO_IPV4POOL_CIDR
value: "__CLUSTER_CIDR__"
- name: CALICO_IPV4POOL_IPIP
value: "off"
- name: DATASTORE_TYPE
value: "kubernetes"
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
- name: FELIX_IPV6SUPPORT
value: "false"
- name: FELIX_LOGSEVERITYSYS
value: "none"
- name: FELIX_PROMETHEUSMETRICSENABLED
value: "true"
- name: IP
value: ""
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: WAIT_FOR_DATASTORE
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /etc/calico
name: etc-calico
readOnly: true
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v1.8.3-hostport
command: ["/install-cni.sh"]
env:
- name: CNI_CONF_NAME
value: "10-calico.conflist"
- name: CNI_NETWORK_CONFIG
value: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico",
"log_level": "debug",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"capabilities": {"portMappings": true},
"noSnat": true
}
]
}
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used to ensure proper kmods are installed.
- name: lib-modules
hostPath:
path: /lib/modules
# Mount in the Felix config file from the host.
- name: etc-calico
hostPath:
path: /etc/calico
# Used to install CNI binaries.
- name: cni-bin-dir
hostPath:
path: /home/kubernetes/bin
# Used to install CNI network config.
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d

View File

@ -1,32 +0,0 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
k8s-app: calico-policy
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-policy
spec:
hostNetwork: true
containers:
- name: calico-policy-controller
image: calico/kube-policy-controller:v0.2.0
env:
- name: ETCD_ENDPOINTS
value: "http://10.0.0.17:6666"
- name: K8S_API
value: "https://kubernetes.default:443"
- name: CONFIGURE_ETC_HOSTS
value: "true"

View File

@ -415,7 +415,7 @@ data:
</store>
</match>
metadata:
name: fluentd-gcp-config
name: fluentd-gcp-config-v1.0
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile

View File

@ -118,7 +118,7 @@ spec:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-gcp-config
name: fluentd-gcp-config-v1.0
- name: ssl-certs
hostPath:
path: /etc/ssl/certs

View File

@ -124,6 +124,12 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NODE_LABELS="$NODE_LABELS,projectcalico.org/ds-ready=true"
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp

View File

@ -167,6 +167,12 @@ KUBEPROXY_TEST_ARGS="${KUBEPROXY_TEST_ARGS:-} ${TEST_CLUSTER_API_CONTENT_TYPE}"
# TODO(piosz): remove this in 1.8
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
# To avoid running Calico on a node that is not configured appropriately,
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
NODE_LABELS="$NODE_LABELS,projectcalico.org/ds-ready=true"
fi
# Optional: Enable node logging.
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp

View File

@ -1200,6 +1200,10 @@ function start-kube-addons {
fi
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Replace the cluster cidr.
local -r calico_file="${dst_dir}/calico-policy-controller/calico-node.yaml"
sed -i -e "s@__CLUSTER_CIDR__@${CLUSTER_IP_RANGE}@g" "${calico_file}"
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"

View File

@ -702,13 +702,19 @@ function start-kubelet {
flags+=" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=${CA_CERT_BUNDLE_PATH}"
fi
# Network plugin
if [[ -n "${NETWORK_PROVIDER:-}" ]]; then
if [[ "${NETWORK_PROVIDER:-}" == "cni" ]]; then
if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then
if [[ "${NETWORK_PROVIDER:-}" == "cni" || "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
flags+=" --cni-bin-dir=/home/kubernetes/bin"
else
flags+=" --network-plugin-dir=/home/kubernetes/bin"
fi
flags+=" --network-plugin=${NETWORK_PROVIDER}"
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
# Calico uses CNI always.
flags+=" --network-plugin=cni"
else
# Otherwise use the configured value.
flags+=" --network-plugin=${NETWORK_PROVIDER}"
fi
fi
if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then
flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}"
@ -1420,6 +1426,10 @@ function start-kube-addons {
fi
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
setup-addon-manifests "addons" "calico-policy-controller"
# Replace the cluster cidr.
local -r calico_file="${dst_dir}/calico-policy-controller/calico-node.yaml"
sed -i -e "s@__CLUSTER_CIDR__@${CLUSTER_IP_RANGE}@g" "${calico_file}"
fi
if [[ "${ENABLE_DEFAULT_STORAGE_CLASS:-}" == "true" ]]; then
setup-addon-manifests "addons" "storage-class/gce"

View File

@ -1,16 +0,0 @@
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_authority": "10.0.0.17:6666",
"log_level": "info",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "/var/lib/kubelet/kubeconfig"
}
}

View File

@ -1,40 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: calico-node
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
k8s-app: calico-node
spec:
hostNetwork: true
containers:
- name: calico-node
image: quay.io/calico/node:v0.20.0
env:
- name: ETCD_ENDPOINTS
value: "http://10.0.0.17:6666"
- name: CALICO_NETWORKING
value: "false"
securityContext:
privileged: true
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/log/calico
name: var-log-calico
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
volumes:
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-log-calico
hostPath:
path: /var/log/calico

View File

@ -1,19 +0,0 @@
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
calico-policy-controller:
file.managed:
- name: /etc/kubernetes/manifests/calico-policy-controller.manifest
- source: salt://calico/calico-policy-controller.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- context:
cpurequest: '20m'
- require:
- service: docker
- service: kubelet
{% endif -%}

View File

@ -1,37 +1,5 @@
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
calico-node:
file.managed:
- name: /etc/kubernetes/manifests/calico-node.manifest
- source: salt://calico/calico-node.manifest
- template: jinja
- user: root
- group: root
- mode: 644
- makedirs: true
- dir_mode: 755
- require:
- kmod: ip6_tables
- kmod: xt_set
- service: docker
- service: kubelet
calico-cni:
file.managed:
- name: /opt/cni/bin/calico
- source: https://github.com/projectcalico/calico-cni/releases/download/v1.3.1/calico
- source_hash: sha256=ac05cb9254b5aaa5822cf10325983431bd25489147f2edf9dec7e43d99c43e77
- makedirs: True
- mode: 744
calico-cni-config:
file.managed:
- name: /etc/cni/net.d/10-calico.conf
- source: salt://calico/10-calico.conf
- makedirs: True
- mode: 644
- template: jinja
ip6_tables:
kmod.present

View File

@ -140,7 +140,7 @@
{% elif pillar.get('network_provider', '').lower() == 'cni' %}
{% set network_plugin = "--network-plugin=cni --network-plugin-dir=/etc/cni/net.d/" %}
{%elif pillar.get('network_policy_provider', '').lower() == 'calico' and grains['roles'][0] != 'kubernetes-master' -%}
{% set network_plugin = "--network-plugin=cni --network-plugin-dir=/etc/cni/net.d/" %}
{% set network_plugin = "--network-plugin=cni --network-plugin-dir=/etc/cni/net.d/ --cni-bin-dir=/home/kubernetes/bin/" %}
{% elif pillar.get('network_provider', '').lower() == 'kubenet' %}
{% set network_plugin = "--network-plugin=kubenet" -%}
{% endif -%}

View File

@ -84,6 +84,3 @@ base:
{% if pillar.get('enable_rescheduler', '').lower() == 'true' %}
- rescheduler
{% endif %}
{% if pillar.get('network_policy_provider', '').lower() == 'calico' %}
- calico.master
{% endif %}

View File

@ -119,8 +119,7 @@ func (hk *HyperKube) Run(args []string) error {
// If we are called directly, parse all flags up to the first real
// argument. That should be the server to run.
command := args[0]
baseCommand := path.Base(command)
serverName := baseCommand
serverName := path.Base(command)
args = args[1:]
if serverName == hk.Name {
@ -144,7 +143,6 @@ func (hk *HyperKube) Run(args []string) error {
args = baseFlags.Args()
if len(args) > 0 && len(args[0]) > 0 {
serverName = args[0]
baseCommand = baseCommand + " " + serverName
args = args[1:]
} else {
err = errors.New("no server specified")

View File

@ -38,6 +38,7 @@ go_library(
"//pkg/master:go_default_library",
"//pkg/master/thirdparty:go_default_library",
"//pkg/master/tunneler:go_default_library",
"//pkg/quota/install:go_default_library",
"//pkg/registry/cachesize:go_default_library",
"//pkg/registry/rbac/rest:go_default_library",
"//pkg/version:go_default_library",

View File

@ -60,6 +60,9 @@ func (options *ServerRunOptions) Validate() []error {
if errs := options.SecureServing.Validate(); len(errs) > 0 {
errors = append(errors, errs...)
}
if errs := options.Authentication.Validate(); len(errs) > 0 {
errors = append(errors, errs...)
}
if errs := options.InsecureServing.Validate("insecure-port"); len(errs) > 0 {
errors = append(errors, errs...)
}

View File

@ -71,6 +71,7 @@ import (
kubeserver "k8s.io/kubernetes/pkg/kubeapiserver/server"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/master/tunneler"
quotainstall "k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/registry/cachesize"
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
"k8s.io/kubernetes/pkg/version"
@ -392,7 +393,12 @@ func BuildAdmissionPluginInitializer(s *options.ServerRunOptions, client interna
// TODO: use a dynamic restmapper. See https://github.com/kubernetes/kubernetes/pull/42615.
restMapper := api.Registry.RESTMapper()
pluginInitializer := kubeapiserveradmission.NewPluginInitializer(client, sharedInformers, apiAuthorizer, cloudConfig, restMapper)
// NOTE: we do not provide informers to the quota registry because admission level decisions
// do not require us to open watches for all items tracked by quota.
quotaRegistry := quotainstall.NewRegistry(nil, nil)
pluginInitializer := kubeapiserveradmission.NewPluginInitializer(client, sharedInformers, apiAuthorizer, cloudConfig, restMapper, quotaRegistry)
return pluginInitializer, nil
}

View File

@ -397,14 +397,20 @@ func StartControllers(controllers map[string]InitFunc, s *options.CMServer, root
rootCA = rootClientBuilder.ConfigOrDie("tokens-controller").CAData
}
go serviceaccountcontroller.NewTokensController(
controller := serviceaccountcontroller.NewTokensController(
sharedInformers.Core().V1().ServiceAccounts(),
sharedInformers.Core().V1().Secrets(),
rootClientBuilder.ClientOrDie("tokens-controller"),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run(int(s.ConcurrentSATokenSyncs), stop)
)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
go controller.Run(int(s.ConcurrentSATokenSyncs), stop)
// start the first set of informers now so that other controllers can start
sharedInformers.Start(stop)
}
} else {

View File

@ -29,7 +29,6 @@ go_library(
"//cmd/kube-proxy/app:go_default_library",
"//pkg/client/metrics/prometheus:go_default_library",
"//pkg/version/prometheus:go_default_library",
"//pkg/version/verflag:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//vendor/k8s.io/apiserver/pkg/util/logs:go_default_library",

View File

@ -40,6 +40,7 @@ go_library(
"//pkg/util/oom:go_default_library",
"//pkg/util/resourcecontainer:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//pkg/version/verflag:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",

View File

@ -68,6 +68,7 @@ import (
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/resourcecontainer"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
"k8s.io/kubernetes/pkg/version/verflag"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus"
@ -333,6 +334,7 @@ environment variables specifying ports opened by the service proxy. There is an
addon that provides cluster DNS for these cluster IPs. The user must create a service
with the apiserver API to configure the proxy.`,
Run: func(cmd *cobra.Command, args []string) {
verflag.PrintAndExitIfRequested()
cmdutil.CheckErr(opts.Complete())
cmdutil.CheckErr(opts.Validate(args))
cmdutil.CheckErr(opts.Run())

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/cmd/kube-proxy/app"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
"k8s.io/kubernetes/pkg/version/verflag"
)
func main() {
@ -42,8 +41,6 @@ func main() {
logs.InitLogs()
defer logs.FlushLogs()
verflag.PrintAndExitIfRequested()
if err := command.Execute(); err != nil {
os.Exit(1)
}

View File

@ -90,6 +90,7 @@ func New() *Generator {
`k8s.io/kubernetes/pkg/apis/settings/v1alpha1`,
`k8s.io/kubernetes/pkg/apis/storage/v1beta1`,
`k8s.io/kubernetes/pkg/apis/storage/v1`,
`k8s.io/kubernetes/pkg/apis/admission/v1alpha1`,
}, ","),
DropEmbeddedFields: "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta",
}

View File

@ -2650,6 +2650,13 @@ When an object is created, the system will populate this list with the current s
<td class="tableblock halign-left valign-top"><p class="tableblock">boolean</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">kind</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_azuredatadiskkind">v1.AzureDataDiskKind</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
@ -6333,6 +6340,10 @@ Examples:<br>
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_v1_azuredatadiskkind">v1.AzureDataDiskKind</h3>
</div>
<div class="sect2">
<h3 id="_v1_preferredschedulingterm">v1.PreferredSchedulingTerm</h3>
@ -6438,7 +6449,7 @@ Examples:<br>
</div>
<div id="footer">
<div id="footer-text">
Last updated 2017-05-17 22:24:34 UTC
Last updated 2017-05-18 19:28:33 UTC
</div>
</div>
</body>

View File

@ -1483,6 +1483,10 @@ When an object is created, the system will populate this list with the current s
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_types_uid">types.UID</h3>
</div>
<div class="sect2">
<h3 id="_v1_azurefilevolumesource">v1.AzureFileVolumeSource</h3>
@ -1531,10 +1535,6 @@ When an object is created, the system will populate this list with the current s
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_types_uid">types.UID</h3>
</div>
<div class="sect2">
<h3 id="_v1_iscsivolumesource">v1.ISCSIVolumeSource</h3>
@ -2229,6 +2229,13 @@ When an object is created, the system will populate this list with the current s
<td class="tableblock halign-left valign-top"><p class="tableblock">boolean</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">kind</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_azuredatadiskkind">v1.AzureDataDiskKind</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
@ -5575,6 +5582,10 @@ Examples:<br>
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_v1_azuredatadiskkind">v1.AzureDataDiskKind</h3>
</div>
<div class="sect2">
<h3 id="_v1_preferredschedulingterm">v1.PreferredSchedulingTerm</h3>
@ -5680,7 +5691,7 @@ Examples:<br>
</div>
<div id="footer">
<div id="footer-text">
Last updated 2017-05-17 22:25:06 UTC
Last updated 2017-05-18 19:30:11 UTC
</div>
</div>
</body>

View File

@ -2236,6 +2236,13 @@ When an object is created, the system will populate this list with the current s
<td class="tableblock halign-left valign-top"><p class="tableblock">boolean</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">kind</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_azuredatadiskkind">v1.AzureDataDiskKind</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
@ -5554,6 +5561,10 @@ Examples:<br>
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_v1_azuredatadiskkind">v1.AzureDataDiskKind</h3>
</div>
<div class="sect2">
<h3 id="_v1_preferredschedulingterm">v1.PreferredSchedulingTerm</h3>
@ -5776,7 +5787,7 @@ Examples:<br>
</div>
<div id="footer">
<div id="footer-text">
Last updated 2017-05-17 22:25:11 UTC
Last updated 2017-05-18 19:30:26 UTC
</div>
</div>
</body>

View File

@ -3358,6 +3358,13 @@ When an object is created, the system will populate this list with the current s
<td class="tableblock halign-left valign-top"><p class="tableblock">boolean</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">kind</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_azuredatadiskkind">v1.AzureDataDiskKind</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
@ -7781,6 +7788,10 @@ Both these may change in the future. Incoming requests are matched against the h
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_v1_azuredatadiskkind">v1.AzureDataDiskKind</h3>
</div>
<div class="sect2">
<h3 id="_v1_preferredschedulingterm">v1.PreferredSchedulingTerm</h3>
@ -8085,7 +8096,7 @@ Both these may change in the future. Incoming requests are matched against the h
</div>
<div id="footer">
<div id="footer-text">
Last updated 2017-05-17 22:25:20 UTC
Last updated 2017-05-18 19:30:55 UTC
</div>
</div>
</body>

View File

@ -3093,6 +3093,13 @@ When an object is created, the system will populate this list with the current s
<td class="tableblock halign-left valign-top"><p class="tableblock">boolean</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">kind</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_azuredatadiskkind">v1.AzureDataDiskKind</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
@ -3616,6 +3623,10 @@ When an object is created, the system will populate this list with the current s
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_v1_azuredatadiskkind">v1.AzureDataDiskKind</h3>
</div>
<div class="sect2">
<h3 id="_v1_downwardapivolumefile">v1.DownwardAPIVolumeFile</h3>
@ -3918,7 +3929,7 @@ Examples:<br>
</div>
<div id="footer">
<div id="footer-text">
Last updated 2017-05-17 22:25:40 UTC
Last updated 2017-05-18 19:31:55 UTC
</div>
</div>
</body>

View File

@ -3871,6 +3871,13 @@ The resulting set of endpoints can be viewed as:<br>
<td class="tableblock halign-left valign-top"><p class="tableblock">boolean</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">kind</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_azuredatadiskkind">v1.AzureDataDiskKind</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
@ -9554,6 +9561,10 @@ Examples:<br>
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_v1_azuredatadiskkind">v1.AzureDataDiskKind</h3>
</div>
<div class="sect2">
<h3 id="_v1_configmapkeyselector">v1.ConfigMapKeySelector</h3>
@ -10032,7 +10043,7 @@ Examples:<br>
</div>
<div id="footer">
<div id="footer-text">
Last updated 2017-05-17 22:24:28 UTC
Last updated 2017-05-18 19:28:13 UTC
</div>
</div>
</body>

View File

@ -35,4 +35,5 @@ spec:
- 'downwardAPI'
- 'configMap'
- 'persistentVolumeClaim'
- 'projected'

View File

@ -211,7 +211,7 @@ sio-small kubernetes.io/scaleio
### PVC for the StorageClass
The next step is to define/deploy a `PeristentVolumeClaim` that will use the StorageClass.
The next step is to define/deploy a `PersistentVolumeClaim` that will use the StorageClass.
File [sc-pvc.yaml](sc-pvc.yaml)

View File

@ -10599,6 +10599,10 @@
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"type": "string"
},
"kind": {
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared",
"type": "string"
},
"readOnly": {
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
"type": "boolean"

View File

@ -5809,6 +5809,10 @@
"readOnly": {
"type": "boolean",
"description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts."
},
"kind": {
"$ref": "v1.AzureDataDiskKind",
"description": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared"
}
}
},
@ -5816,6 +5820,10 @@
"id": "v1.AzureDataDiskCachingMode",
"properties": {}
},
"v1.AzureDataDiskKind": {
"id": "v1.AzureDataDiskKind",
"properties": {}
},
"v1.PhotonPersistentDiskVolumeSource": {
"id": "v1.PhotonPersistentDiskVolumeSource",
"description": "Represents a Photon Controller persistent disk resource.",

View File

@ -49,6 +49,7 @@ go_library(
"//pkg/kubeapiserver/admission:go_default_library",
"//pkg/kubeapiserver/options:go_default_library",
"//pkg/kubeapiserver/server:go_default_library",
"//pkg/quota/install:go_default_library",
"//pkg/registry/autoscaling/horizontalpodautoscaler/storage:go_default_library",
"//pkg/registry/batch/job/storage:go_default_library",
"//pkg/registry/cachesize:go_default_library",

View File

@ -49,6 +49,7 @@ import (
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
kubeserver "k8s.io/kubernetes/pkg/kubeapiserver/server"
quotainstall "k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/registry/cachesize"
"k8s.io/kubernetes/pkg/routes"
"k8s.io/kubernetes/pkg/version"
@ -192,7 +193,10 @@ func NonBlockingRun(s *options.ServerRunOptions, stopCh <-chan struct{}) error {
}
}
pluginInitializer := kubeapiserveradmission.NewPluginInitializer(client, sharedInformers, apiAuthorizer, cloudConfig, nil)
// NOTE: we do not provide informers to the quota registry because admission level decisions
// do not require us to open watches for all items tracked by quota.
quotaRegistry := quotainstall.NewRegistry(nil, nil)
pluginInitializer := kubeapiserveradmission.NewPluginInitializer(client, sharedInformers, apiAuthorizer, cloudConfig, nil, quotaRegistry)
err = s.Admission.ApplyTo(
genericConfig,

View File

@ -16,6 +16,7 @@ go_library(
"//pkg/client/metrics/prometheus:go_default_library",
"//pkg/kubectl/cmd/util:go_default_library",
"//pkg/util/logs:go_default_library",
"//pkg/version:go_default_library",
"//pkg/version/prometheus:go_default_library",
],
)

View File

@ -17,19 +17,24 @@ limitations under the License.
package app
import (
"fmt"
"os"
"k8s.io/kubernetes/federation/pkg/kubefed"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/util/logs"
"k8s.io/kubernetes/pkg/version"
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
)
const hyperkubeImageName = "gcr.io/google_containers/hyperkube-amd64"
func Run() error {
logs.InitLogs()
defer logs.FlushLogs()
cmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr)
defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get())
cmd := kubefed.NewKubeFedCommand(cmdutil.NewFactory(nil), os.Stdin, os.Stdout, os.Stderr, defaultImage)
return cmd.Execute()
}

View File

@ -3032,6 +3032,13 @@ When an object is created, the system will populate this list with the current s
<td class="tableblock halign-left valign-top"><p class="tableblock">boolean</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
</tr>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">kind</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">false</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock"><a href="#_v1_azuredatadiskkind">v1.AzureDataDiskKind</a></p></td>
<td class="tableblock halign-left valign-top"></td>
</tr>
</tbody>
</table>
@ -6957,6 +6964,10 @@ Both these may change in the future. Incoming requests are matched against the h
</tbody>
</table>
</div>
<div class="sect2">
<h3 id="_v1_azuredatadiskkind">v1.AzureDataDiskKind</h3>
</div>
<div class="sect2">
<h3 id="_v1_preferredschedulingterm">v1.PreferredSchedulingTerm</h3>
@ -7165,7 +7176,7 @@ Both these may change in the future. Incoming requests are matched against the h
</div>
<div id="footer">
<div id="footer-text">
Last updated 2017-05-17 22:26:29 UTC
Last updated 2017-05-18 19:40:23 UTC
</div>
</div>
</body>

View File

@ -74,7 +74,7 @@ func (c *FederatedTypeCRUDTester) CheckLifecycle(desiredObject pkgruntime.Object
c.CheckDelete(obj, &orphanDependents)
}
func (c *FederatedTypeCRUDTester) CheckCreate(desiredObject pkgruntime.Object) pkgruntime.Object {
func (c *FederatedTypeCRUDTester) Create(desiredObject pkgruntime.Object) pkgruntime.Object {
namespace := c.adapter.ObjectMeta(desiredObject).Namespace
c.tl.Logf("Creating new federated %s in namespace %q", c.kind, namespace)
@ -86,6 +86,12 @@ func (c *FederatedTypeCRUDTester) CheckCreate(desiredObject pkgruntime.Object) p
namespacedName := c.adapter.NamespacedName(obj)
c.tl.Logf("Created new federated %s %q", c.kind, namespacedName)
return obj
}
func (c *FederatedTypeCRUDTester) CheckCreate(desiredObject pkgruntime.Object) pkgruntime.Object {
obj := c.Create(desiredObject)
c.CheckPropagation(obj)
return obj
@ -165,18 +171,25 @@ func (c *FederatedTypeCRUDTester) CheckDelete(obj pkgruntime.Object, orphanDepen
// CheckPropagation checks propagation for the crud tester's clients
func (c *FederatedTypeCRUDTester) CheckPropagation(obj pkgruntime.Object) {
c.CheckPropagationForClients(obj, c.clusterClients)
c.CheckPropagationForClients(obj, c.clusterClients, true)
}
// CheckPropagationForClients checks propagation for the provided clients
func (c *FederatedTypeCRUDTester) CheckPropagationForClients(obj pkgruntime.Object, clusterClients []clientset.Interface) {
func (c *FederatedTypeCRUDTester) CheckPropagationForClients(obj pkgruntime.Object, clusterClients []clientset.Interface, objExpected bool) {
namespacedName := c.adapter.NamespacedName(obj)
c.tl.Logf("Waiting for %s %q in %d clusters", c.kind, namespacedName, len(clusterClients))
for _, client := range clusterClients {
err := c.waitForResource(client, obj)
if err != nil {
switch {
case err == wait.ErrWaitTimeout:
if objExpected {
c.tl.Fatalf("Timeout verifying %s %q in a member cluster: %v", c.kind, namespacedName, err)
}
case err != nil:
c.tl.Fatalf("Failed to verify %s %q in a member cluster: %v", c.kind, namespacedName, err)
case err == nil && !objExpected:
c.tl.Fatalf("Found unexpected object %s %q in a member cluster: %v", c.kind, namespacedName, err)
}
}
}

View File

@ -24,7 +24,6 @@ go_library(
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/kubectl/cmd/templates:go_default_library",
"//pkg/kubectl/cmd/util:go_default_library",
"//pkg/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",

View File

@ -51,7 +51,6 @@ import (
client "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/version"
"github.com/golang/glog"
"github.com/spf13/cobra"
@ -126,8 +125,6 @@ var (
"app": "federated-cluster",
"module": "federation-controller-manager",
}
hyperkubeImageName = "gcr.io/google_containers/hyperkube-amd64"
)
type initFederation struct {
@ -154,9 +151,7 @@ type initFederationOptions struct {
apiServerEnableTokenAuth bool
}
func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {
defaultImage := fmt.Sprintf("%s:%s", hyperkubeImageName, version.Get())
func (o *initFederationOptions) Bind(flags *pflag.FlagSet, defaultImage string) {
flags.StringVar(&o.dnsZoneName, "dns-zone-name", "", "DNS suffix for this federation. Federated Service DNS names are published with this suffix.")
flags.StringVar(&o.image, "image", defaultImage, "Image to use for federation API server and controller manager binaries.")
flags.StringVar(&o.dnsProvider, "dns-provider", "", "Dns provider to be used for this deployment.")
@ -174,7 +169,7 @@ func (o *initFederationOptions) Bind(flags *pflag.FlagSet) {
// NewCmdInit defines the `init` command that bootstraps a federation
// control plane inside a set of host clusters.
func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {
func NewCmdInit(cmdOut io.Writer, config util.AdminConfig, defaultImage string) *cobra.Command {
opts := &initFederation{}
cmd := &cobra.Command{
@ -190,7 +185,7 @@ func NewCmdInit(cmdOut io.Writer, config util.AdminConfig) *cobra.Command {
flags := cmd.Flags()
opts.commonOptions.Bind(flags)
opts.options.Bind(flags)
opts.options.Bind(flags, defaultImage)
return cmd
}

View File

@ -244,7 +244,7 @@ func TestInitFederation(t *testing.T) {
t.Fatalf("[%d] unexpected error: %v", i, err)
}
cmd := NewCmdInit(buf, adminConfig)
cmd := NewCmdInit(buf, adminConfig, "image")
cmd.Flags().Set("kubeconfig", tc.kubeconfigExplicit)
cmd.Flags().Set("host-cluster-context", "substrate")

View File

@ -31,7 +31,7 @@ import (
)
// NewKubeFedCommand creates the `kubefed` command and its nested children.
func NewKubeFedCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cobra.Command {
func NewKubeFedCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer, defaultImage string) *cobra.Command {
// Parent command to which all subcommands are added.
cmds := &cobra.Command{
Use: "kubefed",
@ -53,7 +53,7 @@ func NewKubeFedCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob
{
Message: "Basic Commands:",
Commands: []*cobra.Command{
kubefedinit.NewCmdInit(out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
kubefedinit.NewCmdInit(out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions()), defaultImage),
NewCmdJoin(f, out, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
NewCmdUnjoin(f, out, err, util.NewAdminConfig(clientcmd.NewDefaultPathOptions())),
},

View File

@ -1,4 +1,5 @@
reviewers:
- cblecker
- eparis
- fejta
- ixdy

View File

@ -79,6 +79,7 @@ KUBE_NONSERVER_GROUP_VERSIONS="
abac.authorization.kubernetes.io/v1beta1 \
componentconfig/v1alpha1 \
imagepolicy.k8s.io/v1alpha1\
admission.k8s.io/v1alpha1\
"
# This emulates "readlink -f" which is not available on MacOS X.

View File

@ -3789,6 +3789,30 @@ __EOF__
output_message=$(! KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins/ kubectl plugin error 2>&1)
kube::test::if_has_string "${output_message}" 'error: exit status 1'
# plugin tree
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree 2>&1)
kube::test::if_has_string "${output_message}" 'Plugin with a tree of commands'
kube::test::if_has_string "${output_message}" 'child1\s\+The first child of a tree'
kube::test::if_has_string "${output_message}" 'child2\s\+The second child of a tree'
kube::test::if_has_string "${output_message}" 'child3\s\+The third child of a tree'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 --help 2>&1)
kube::test::if_has_string "${output_message}" 'The first child of a tree'
kube::test::if_has_not_string "${output_message}" 'The second child'
kube::test::if_has_not_string "${output_message}" 'child2'
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin tree child1 2>&1)
kube::test::if_has_string "${output_message}" 'child one'
kube::test::if_has_not_string "${output_message}" 'child1'
kube::test::if_has_not_string "${output_message}" 'The first child'
# plugin env
output_message=$(KUBECTL_PLUGINS_PATH=test/fixtures/pkg/kubectl/plugins kubectl plugin env 2>&1)
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CURRENT_NAMESPACE'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_CALLER'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_COMMAND=./env.sh'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_DESCRIPTOR_SHORT_DESC=The plugin envs plugin'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG'
kube::test::if_has_string "${output_message}" 'KUBECTL_PLUGINS_GLOBAL_FLAG_REQUEST_TIMEOUT=0'
#################
# Impersonation #
#################

View File

@ -153,7 +153,6 @@ else
# Runtime flags
test_args='--kubelet-flags="--container-runtime='$runtime'" '$test_args
if [[ $runtime == "remote" ]] ; then
test_args='--kubelet-flags="--experimental-cri=true" '$test_args
if [[ ! -z $container_runtime_endpoint ]] ; then
test_args='--kubelet-flags="--container-runtime-endpoint='$container_runtime_endpoint'" '$test_args
fi

View File

@ -63,7 +63,7 @@ kube::log::status "Starting federation-apiserver"
--cert-dir="${TMP_DIR}/certs" \
--token-auth-file=$TMP_DIR/tokenauth.csv >/tmp/openapi-federation-api-server.log 2>&1 &
APISERVER_PID=$!
kube::util::wait_for_url "${API_HOST}:${API_PORT}/" "apiserver: "
kube::util::wait_for_url "${API_HOST}:${API_PORT}/healthz" "apiserver: "
kube::log::status "Updating " ${OPENAPI_ROOT_DIR}

View File

@ -69,6 +69,7 @@ groups_without_codegen=(
"abac"
"componentconfig"
"imagepolicy"
"admission"
)
client_gen_file="${KUBE_ROOT}/cmd/libs/go2idl/client-gen/main.go"
@ -92,6 +93,7 @@ done
# them. This happens for types that aren't served from the API server
packages_without_install=(
"k8s.io/kubernetes/pkg/apis/abac"
"k8s.io/kubernetes/pkg/apis/admission"
)
known_version_files=(
"pkg/master/import_known_versions.go"
@ -116,7 +118,7 @@ for expected_install_package in "${expected_install_packages[@]}"; do
done
done
# check all groupversions to make sure they're in the init.sh file. This isn't perfect, but its slightly
# check all groupversions to make sure they're in the init.sh file. This isn't perfect, but its slightly
# better than nothing
for external_group_version in "${external_group_versions[@]}"; do
if ! grep -q "${external_group_version}" "${KUBE_ROOT}/hack/lib/init.sh" ; then

View File

@ -26,7 +26,7 @@ source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::verify_go_version
cd "${KUBE_ROOT}"
if git --no-pager grep -E $'^(import |\t)[a-z]+[A-Z_][a-zA-Z]* "[^"]+"$' -- '**/*.go' ':(exclude)vendor/*' ':(exclude)staging/src/k8s.io/client-go/*vendor/*' ':(exclude)staging/src/k8s.io/metrics/*'; then
if git --no-pager grep -E $'^(import |\t)[a-z]+[A-Z_][a-zA-Z]* "[^"]+"$' -- '**/*.go' ':(exclude)vendor/*' ':(exclude)staging/src/k8s.io/client-go/*vendor/*' ':(exclude)staging/src/k8s.io/metrics/*' ':(exclude)pkg/apis/admission/v1alpha1/zz_generated.conversion.go'; then
echo "!!! Some package aliases break go conventions."
echo "To fix these errors, do not use capitalized or underlined characters"
echo "in pkg aliases. Refer to https://blog.golang.org/package-names for more info."

View File

@ -16,6 +16,7 @@ filegroup(
"//pkg/api:all-srcs",
"//pkg/apimachinery/tests:all-srcs",
"//pkg/apis/abac:all-srcs",
"//pkg/apis/admission:all-srcs",
"//pkg/apis/apps:all-srcs",
"//pkg/apis/authentication:all-srcs",
"//pkg/apis/authorization:all-srcs",

View File

@ -427,7 +427,11 @@ func coreFuncs(t apitesting.TestingCommon) []interface{} {
func(obj *api.AzureDiskVolumeSource, c fuzz.Continue) {
if obj.CachingMode == nil {
obj.CachingMode = new(api.AzureDataDiskCachingMode)
*obj.CachingMode = api.AzureDataDiskCachingNone
*obj.CachingMode = api.AzureDataDiskCachingReadWrite
}
if obj.Kind == nil {
obj.Kind = new(api.AzureDataDiskKind)
*obj.Kind = api.AzureSharedBlobDisk
}
if obj.FSType == nil {
obj.FSType = new(string)

View File

@ -1059,11 +1059,16 @@ type PortworxVolumeSource struct {
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
@ -1084,6 +1089,8 @@ type AzureDiskVolumeSource struct {
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind
}
// ScaleIOVolumeSource represents a persistent ScaleIO volume

View File

@ -239,7 +239,11 @@ func SetDefaults_ISCSIVolumeSource(obj *ISCSIVolumeSource) {
func SetDefaults_AzureDiskVolumeSource(obj *AzureDiskVolumeSource) {
if obj.CachingMode == nil {
obj.CachingMode = new(AzureDataDiskCachingMode)
*obj.CachingMode = AzureDataDiskCachingNone
*obj.CachingMode = AzureDataDiskCachingReadWrite
}
if obj.Kind == nil {
obj.Kind = new(AzureDataDiskKind)
*obj.Kind = AzureSharedBlobDisk
}
if obj.FSType == nil {
obj.FSType = new(string)

File diff suppressed because it is too large Load Diff

View File

@ -120,6 +120,9 @@ message AzureDiskVolumeSource {
// the ReadOnly setting in VolumeMounts.
// +optional
optional bool readOnly = 5;
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
optional string kind = 6;
}
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.

View File

@ -17882,6 +17882,32 @@ func (x *AzureDataDiskCachingMode) CodecDecodeSelf(d *codec1978.Decoder) {
}
}
func (x AzureDataDiskKind) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
_, _, _ = h, z, r
yym1 := z.EncBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.EncExt(x) {
} else {
r.EncodeString(codecSelferC_UTF81234, string(x))
}
}
func (x *AzureDataDiskKind) CodecDecodeSelf(d *codec1978.Decoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
yym1 := z.DecBinary()
_ = yym1
if false {
} else if z.HasExtensions() && z.DecExt(x) {
} else {
*((*string)(x)) = r.DecodeString()
}
}
func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
var h codecSelfer1234
z, r := codec1978.GenHelperEncoder(e)
@ -17896,15 +17922,16 @@ func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
} else {
yysep2 := !z.EncBinary()
yy2arr2 := z.EncBasicHandle().StructToArray
var yyq2 [5]bool
var yyq2 [6]bool
_, _, _ = yysep2, yyq2, yy2arr2
const yyr2 bool = false
yyq2[2] = x.CachingMode != nil
yyq2[3] = x.FSType != nil
yyq2[4] = x.ReadOnly != nil
yyq2[5] = x.Kind != nil
var yynn2 int
if yyr2 || yy2arr2 {
r.EncodeArrayStart(5)
r.EncodeArrayStart(6)
} else {
yynn2 = 2
for _, b := range yyq2 {
@ -18048,6 +18075,31 @@ func (x *AzureDiskVolumeSource) CodecEncodeSelf(e *codec1978.Encoder) {
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayElem1234)
if yyq2[5] {
if x.Kind == nil {
r.EncodeNil()
} else {
yy25 := *x.Kind
yy25.CodecEncodeSelf(e)
}
} else {
r.EncodeNil()
}
} else {
if yyq2[5] {
z.EncSendContainerState(codecSelfer_containerMapKey1234)
r.EncodeString(codecSelferC_UTF81234, string("kind"))
z.EncSendContainerState(codecSelfer_containerMapValue1234)
if x.Kind == nil {
r.EncodeNil()
} else {
yy27 := *x.Kind
yy27.CodecEncodeSelf(e)
}
}
}
if yyr2 || yy2arr2 {
z.EncSendContainerState(codecSelfer_containerArrayEnd1234)
} else {
@ -18176,6 +18228,17 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromMap(l int, d *codec1978.Decod
*((*bool)(x.ReadOnly)) = r.DecodeBool()
}
}
case "kind":
if r.TryDecodeAsNil() {
if x.Kind != nil {
x.Kind = nil
}
} else {
if x.Kind == nil {
x.Kind = new(AzureDataDiskKind)
}
x.Kind.CodecDecodeSelf(d)
}
default:
z.DecStructFieldNotFound(-1, yys3)
} // end switch yys3
@ -18187,16 +18250,16 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec
var h codecSelfer1234
z, r := codec1978.GenHelperDecoder(d)
_, _, _ = h, z, r
var yyj13 int
var yyb13 bool
var yyhl13 bool = l >= 0
yyj13++
if yyhl13 {
yyb13 = yyj13 > l
var yyj14 int
var yyb14 bool
var yyhl14 bool = l >= 0
yyj14++
if yyhl14 {
yyb14 = yyj14 > l
} else {
yyb13 = r.CheckBreak()
yyb14 = r.CheckBreak()
}
if yyb13 {
if yyb14 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -18204,21 +18267,21 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec
if r.TryDecodeAsNil() {
x.DiskName = ""
} else {
yyv14 := &x.DiskName
yym15 := z.DecBinary()
_ = yym15
yyv15 := &x.DiskName
yym16 := z.DecBinary()
_ = yym16
if false {
} else {
*((*string)(yyv14)) = r.DecodeString()
*((*string)(yyv15)) = r.DecodeString()
}
}
yyj13++
if yyhl13 {
yyb13 = yyj13 > l
yyj14++
if yyhl14 {
yyb14 = yyj14 > l
} else {
yyb13 = r.CheckBreak()
yyb14 = r.CheckBreak()
}
if yyb13 {
if yyb14 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -18226,21 +18289,21 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec
if r.TryDecodeAsNil() {
x.DataDiskURI = ""
} else {
yyv16 := &x.DataDiskURI
yym17 := z.DecBinary()
_ = yym17
yyv17 := &x.DataDiskURI
yym18 := z.DecBinary()
_ = yym18
if false {
} else {
*((*string)(yyv16)) = r.DecodeString()
*((*string)(yyv17)) = r.DecodeString()
}
}
yyj13++
if yyhl13 {
yyb13 = yyj13 > l
yyj14++
if yyhl14 {
yyb14 = yyj14 > l
} else {
yyb13 = r.CheckBreak()
yyb14 = r.CheckBreak()
}
if yyb13 {
if yyb14 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -18255,13 +18318,13 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec
}
x.CachingMode.CodecDecodeSelf(d)
}
yyj13++
if yyhl13 {
yyb13 = yyj13 > l
yyj14++
if yyhl14 {
yyb14 = yyj14 > l
} else {
yyb13 = r.CheckBreak()
yyb14 = r.CheckBreak()
}
if yyb13 {
if yyb14 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -18274,20 +18337,20 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec
if x.FSType == nil {
x.FSType = new(string)
}
yym20 := z.DecBinary()
_ = yym20
yym21 := z.DecBinary()
_ = yym21
if false {
} else {
*((*string)(x.FSType)) = r.DecodeString()
}
}
yyj13++
if yyhl13 {
yyb13 = yyj13 > l
yyj14++
if yyhl14 {
yyb14 = yyj14 > l
} else {
yyb13 = r.CheckBreak()
yyb14 = r.CheckBreak()
}
if yyb13 {
if yyb14 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
@ -18300,25 +18363,46 @@ func (x *AzureDiskVolumeSource) codecDecodeSelfFromArray(l int, d *codec1978.Dec
if x.ReadOnly == nil {
x.ReadOnly = new(bool)
}
yym22 := z.DecBinary()
_ = yym22
yym23 := z.DecBinary()
_ = yym23
if false {
} else {
*((*bool)(x.ReadOnly)) = r.DecodeBool()
}
}
for {
yyj13++
if yyhl13 {
yyb13 = yyj13 > l
} else {
yyb13 = r.CheckBreak()
yyj14++
if yyhl14 {
yyb14 = yyj14 > l
} else {
yyb14 = r.CheckBreak()
}
if yyb14 {
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
return
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
if r.TryDecodeAsNil() {
if x.Kind != nil {
x.Kind = nil
}
if yyb13 {
} else {
if x.Kind == nil {
x.Kind = new(AzureDataDiskKind)
}
x.Kind.CodecDecodeSelf(d)
}
for {
yyj14++
if yyhl14 {
yyb14 = yyj14 > l
} else {
yyb14 = r.CheckBreak()
}
if yyb14 {
break
}
z.DecSendContainerState(codecSelfer_containerArrayElem1234)
z.DecStructFieldNotFound(yyj13-1, "")
z.DecStructFieldNotFound(yyj14-1, "")
}
z.DecSendContainerState(codecSelfer_containerArrayEnd1234)
}

View File

@ -1124,11 +1124,16 @@ type PhotonPersistentDiskVolumeSource struct {
}
type AzureDataDiskCachingMode string
type AzureDataDiskKind string
const (
AzureDataDiskCachingNone AzureDataDiskCachingMode = "None"
AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly"
AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite"
AzureSharedBlobDisk AzureDataDiskKind = "Shared"
AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated"
AzureManagedDisk AzureDataDiskKind = "Managed"
)
// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.
@ -1149,6 +1154,8 @@ type AzureDiskVolumeSource struct {
// the ReadOnly setting in VolumeMounts.
// +optional
ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"`
// Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared
Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"`
}
// PortworxVolumeSource represents a Portworx volume resource.

View File

@ -76,6 +76,7 @@ var map_AzureDiskVolumeSource = map[string]string{
"cachingMode": "Host Caching mode: None, Read Only, Read Write.",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
"readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.",
"kind": "Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared",
}
func (AzureDiskVolumeSource) SwaggerDoc() map[string]string {

View File

@ -474,6 +474,7 @@ func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *Azure
out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode))
out.FSType = (*string)(unsafe.Pointer(in.FSType))
out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly))
out.Kind = (*api.AzureDataDiskKind)(unsafe.Pointer(in.Kind))
return nil
}
@ -488,6 +489,7 @@ func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.A
out.CachingMode = (*AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode))
out.FSType = (*string)(unsafe.Pointer(in.FSType))
out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly))
out.Kind = (*AzureDataDiskKind)(unsafe.Pointer(in.Kind))
return nil
}

View File

@ -298,6 +298,11 @@ func DeepCopy_v1_AzureDiskVolumeSource(in interface{}, out interface{}, c *conve
*out = new(bool)
**out = **in
}
if in.Kind != nil {
in, out := &in.Kind, &out.Kind
*out = new(AzureDataDiskKind)
**out = **in
}
return nil
}
}

View File

@ -542,10 +542,16 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.E
allErrs = append(allErrs, validateConfigMapVolumeSource(source.ConfigMap, fldPath.Child("configMap"))...)
}
}
if source.AzureFile != nil {
numVolumes++
allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...)
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureFile"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureFile(source.AzureFile, fldPath.Child("azureFile"))...)
}
}
if source.VsphereVolume != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
@ -571,8 +577,12 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path) field.E
}
}
if source.AzureDisk != nil {
numVolumes++
allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...)
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(fldPath.Child("azureDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureDisk(source.AzureDisk, fldPath.Child("azureDisk"))...)
}
}
if source.Projected != nil {
if numVolumes > 0 {
@ -1017,19 +1027,39 @@ func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) fi
return allErrs
}
var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite))
func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList {
var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite))
var supportedDiskKinds = sets.NewString(string(api.AzureSharedBlobDisk), string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk))
diskUriSupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"}
diskUriSupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"}
allErrs := field.ErrorList{}
if azure.DiskName == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("diskName"), ""))
}
if azure.DataDiskURI == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("diskURI"), ""))
}
if azure.CachingMode != nil && !supportedCachingModes.Has(string(*azure.CachingMode)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("cachingMode"), *azure.CachingMode, supportedCachingModes.List()))
}
if azure.Kind != nil && !supportedDiskKinds.Has(string(*azure.Kind)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("kind"), *azure.Kind, supportedDiskKinds.List()))
}
// validate that DiskUri is the correct format
if azure.Kind != nil && *azure.Kind == api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedManaged))
}
if azure.Kind != nil && *azure.Kind != api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedblob))
}
return allErrs
}
@ -1211,9 +1241,15 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
allErrs = append(allErrs, validateFlexVolumeSource(pv.Spec.FlexVolume, specPath.Child("flexVolume"))...)
}
if pv.Spec.AzureFile != nil {
numVolumes++
allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...)
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(specPath.Child("azureFile"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureFile(pv.Spec.AzureFile, specPath.Child("azureFile"))...)
}
}
if pv.Spec.VsphereVolume != nil {
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(specPath.Child("vsphereVolume"), "may not specify more than 1 volume type"))
@ -1239,8 +1275,12 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList {
}
}
if pv.Spec.AzureDisk != nil {
numVolumes++
allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...)
if numVolumes > 0 {
allErrs = append(allErrs, field.Forbidden(specPath.Child("azureDisk"), "may not specify more than 1 volume type"))
} else {
numVolumes++
allErrs = append(allErrs, validateAzureDisk(pv.Spec.AzureDisk, specPath.Child("azureDisk"))...)
}
}
if pv.Spec.ScaleIO != nil {
if numVolumes > 0 {

View File

@ -300,6 +300,11 @@ func DeepCopy_api_AzureDiskVolumeSource(in interface{}, out interface{}, c *conv
*out = new(bool)
**out = **in
}
if in.Kind != nil {
in, out := &in.Kind, &out.Kind
*out = new(AzureDataDiskKind)
**out = **in
}
return nil
}
}

46
pkg/apis/admission/BUILD Normal file
View File

@ -0,0 +1,46 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
"types.generated.go",
"types.go",
"zz_generated.deepcopy.go",
],
tags = ["automanaged"],
deps = [
"//pkg/apis/authentication:go_default_library",
"//vendor/github.com/ugorji/go/codec:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/apis/admission/v1alpha1:all-srcs",
],
tags = ["automanaged"],
)

19
pkg/apis/admission/doc.go Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +groupName=admission.k8s.io
package admission // import "k8s.io/kubernetes/pkg/apis/admission"

View File

@ -0,0 +1,52 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name use in this package
const GroupName = "admission.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
// Kind takes an unqualified kind and returns a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// SchemeBuilder the schema builder
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme handler to add items to the schema
AddToScheme = SchemeBuilder.AddToScheme
)
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&AdmissionReview{},
)
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,73 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package admission
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
"k8s.io/kubernetes/pkg/apis/authentication"
)
// AdmissionReview describes an admission request.
type AdmissionReview struct {
metav1.TypeMeta
// Spec describes the attributes for the admission request.
// Since this admission controller is non-mutating the webhook should avoid setting this in its response to avoid the
// cost of deserializing it.
Spec AdmissionReviewSpec
// Status is filled in by the webhook and indicates whether the admission request should be permitted.
Status AdmissionReviewStatus
}
// AdmissionReviewSpec describes the admission.Attributes for the admission request.
type AdmissionReviewSpec struct {
// Kind is the type of object being manipulated. For example: Pod
Kind metav1.GroupVersionKind
// Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this method will return the empty string.
Name string
// Namespace is the namespace associated with the request (if any).
Namespace string
// Object is the object from the incoming request prior to default values being applied
Object runtime.Object
// OldObject is the existing object. Only populated for UPDATE requests.
OldObject runtime.Object
// Operation is the operation being performed
Operation admission.Operation
// Resource is the name of the resource being requested. This is not the kind. For example: pods
Resource metav1.GroupVersionResource
// SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent
// resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while
// /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on
// pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource
// "binding", and kind "Binding".
SubResource string
// UserInfo is information about the requesting user
UserInfo authentication.UserInfo
}
// AdmissionReviewStatus describes the status of the admission request.
type AdmissionReviewStatus struct {
// Allowed indicates whether or not the admission request was permitted.
Allowed bool
// Result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
// +optional
Result *metav1.Status
}

View File

@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"generated.pb.go",
"helpers.go",
"register.go",
"types.generated.go",
"types.go",
"zz_generated.conversion.go",
"zz_generated.deepcopy.go",
"zz_generated.defaults.go",
],
tags = ["automanaged"],
deps = [
"//pkg/apis/admission:go_default_library",
"//pkg/apis/authentication/v1:go_default_library",
"//vendor/github.com/gogo/protobuf/proto:go_default_library",
"//vendor/github.com/ugorji/go/codec:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/conversion:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/admission
// +k8s:openapi-gen=false
// +k8s:defaulter-gen=TypeMeta
// +groupName=admission.k8s.io
package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/admission/v1alpha1"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,96 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.kubernetes.pkg.apis.admission.v1alpha1;
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
import "k8s.io/apiserver/pkg/apis/example/v1/generated.proto";
import "k8s.io/kubernetes/pkg/apis/authentication/v1/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1alpha1";
// AdmissionReview describes an admission request.
message AdmissionReview {
// Spec describes the attributes for the admission request.
// Since this admission controller is non-mutating the webhook should avoid setting this in its response to avoid the
// cost of deserializing it.
// +optional
optional AdmissionReviewSpec spec = 1;
// Status is filled in by the webhook and indicates whether the admission request should be permitted.
// +optional
optional AdmissionReviewStatus status = 2;
}
// AdmissionReviewSpec describes the admission.Attributes for the admission request.
message AdmissionReviewSpec {
// Kind is the type of object being manipulated. For example: Pod
optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind kind = 1;
// Object is the object from the incoming request prior to default values being applied
optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2;
// OldObject is the existing object. Only populated for UPDATE requests.
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension oldObject = 3;
// Operation is the operation being performed
optional string operation = 4;
// Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this method will return the empty string.
// +optional
optional string name = 5;
// Namespace is the namespace associated with the request (if any).
// +optional
optional string namespace = 6;
// Resource is the name of the resource being requested. This is not the kind. For example: pods
optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource resource = 7;
// SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent
// resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while
// /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on
// pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource
// "binding", and kind "Binding".
// +optional
optional string subResource = 8;
// UserInfo is information about the requesting user
optional k8s.io.kubernetes.pkg.apis.authentication.v1.UserInfo userInfo = 9;
}
// AdmissionReviewStatus describes the status of the admission request.
message AdmissionReviewStatus {
// Allowed indicates whether or not the admission request was permitted.
optional bool allowed = 1;
// Result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 2;
}

View File

@ -0,0 +1,68 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
authenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
)
// NewAdmissionReview returns an AdmissionReview for the provided admission.Attributes
func NewAdmissionReview(attr admission.Attributes) AdmissionReview {
gvk := attr.GetKind()
gvr := attr.GetResource()
aUserInfo := attr.GetUserInfo()
userInfo := authenticationv1.UserInfo{
Extra: make(map[string]authenticationv1.ExtraValue),
Groups: aUserInfo.GetGroups(),
UID: aUserInfo.GetUID(),
Username: aUserInfo.GetName(),
}
// Convert the extra information in the user object
for key, val := range aUserInfo.GetExtra() {
userInfo.Extra[key] = authenticationv1.ExtraValue(val)
}
return AdmissionReview{
Spec: AdmissionReviewSpec{
Name: attr.GetName(),
Namespace: attr.GetNamespace(),
Resource: metav1.GroupVersionResource{
Group: gvr.Group,
Resource: gvr.Resource,
Version: gvr.Version,
},
SubResource: attr.GetSubresource(),
Operation: attr.GetOperation(),
Object: runtime.RawExtension{
Object: attr.GetObject(),
},
OldObject: runtime.RawExtension{
Object: attr.GetOldObject(),
},
Kind: metav1.GroupVersionKind{
Group: gvk.Group,
Kind: gvk.Kind,
Version: gvk.Version,
},
UserInfo: userInfo,
},
}
}

View File

@ -0,0 +1,58 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name for this API.
const GroupName = "admission.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
// TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api.
// localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes.
SchemeBuilder runtime.SchemeBuilder
localSchemeBuilder = &SchemeBuilder
AddToScheme = localSchemeBuilder.AddToScheme
)
func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
localSchemeBuilder.Register(addKnownTypes, RegisterDefaults)
}
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&AdmissionReview{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,78 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/admission"
authenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
)
// AdmissionReview describes an admission request.
type AdmissionReview struct {
metav1.TypeMeta `json:",inline"`
// Spec describes the attributes for the admission request.
// Since this admission controller is non-mutating the webhook should avoid setting this in its response to avoid the
// cost of deserializing it.
// +optional
Spec AdmissionReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"`
// Status is filled in by the webhook and indicates whether the admission request should be permitted.
// +optional
Status AdmissionReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}
// AdmissionReviewSpec describes the admission.Attributes for the admission request.
type AdmissionReviewSpec struct {
// Kind is the type of object being manipulated. For example: Pod
Kind metav1.GroupVersionKind `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"`
// Object is the object from the incoming request prior to default values being applied
Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"`
// OldObject is the existing object. Only populated for UPDATE requests.
// +optional
OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,3,opt,name=oldObject"`
// Operation is the operation being performed
Operation admission.Operation `json:"operation,omitempty" protobuf:"bytes,4,opt,name=operation"`
// Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
// rely on the server to generate the name. If that is the case, this method will return the empty string.
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
// Namespace is the namespace associated with the request (if any).
// +optional
Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
// Resource is the name of the resource being requested. This is not the kind. For example: pods
Resource metav1.GroupVersionResource `json:"resource,omitempty" protobuf:"bytes,7,opt,name=resource"`
// SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent
// resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while
// /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on
// pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource
// "binding", and kind "Binding".
// +optional
SubResource string `json:"subResource,omitempty" protobuf:"bytes,8,opt,name=subResource"`
// UserInfo is information about the requesting user
UserInfo authenticationv1.UserInfo `json:"userInfo,omitempty" protobuf:"bytes,9,opt,name=userInfo"`
}
// AdmissionReviewStatus describes the status of the admission request.
type AdmissionReviewStatus struct {
// Allowed indicates whether or not the admission request was permitted.
Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"`
// Result contains extra details into why an admission request was denied.
// This field IS NOT consulted in any way if "Allowed" is "true".
// +optional
Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"`
}

View File

@ -0,0 +1,149 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by conversion-gen. Do not edit it manually!
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
pkg_admission "k8s.io/apiserver/pkg/admission"
admission "k8s.io/kubernetes/pkg/apis/admission"
unsafe "unsafe"
)
func init() {
SchemeBuilder.Register(RegisterConversions)
}
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(scheme *runtime.Scheme) error {
return scheme.AddGeneratedConversionFuncs(
Convert_v1alpha1_AdmissionReview_To_admission_AdmissionReview,
Convert_admission_AdmissionReview_To_v1alpha1_AdmissionReview,
Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec,
Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec,
Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus,
Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus,
)
}
func autoConvert_v1alpha1_AdmissionReview_To_admission_AdmissionReview(in *AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error {
if err := Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_AdmissionReview_To_admission_AdmissionReview is an autogenerated conversion function.
func Convert_v1alpha1_AdmissionReview_To_admission_AdmissionReview(in *AdmissionReview, out *admission.AdmissionReview, s conversion.Scope) error {
return autoConvert_v1alpha1_AdmissionReview_To_admission_AdmissionReview(in, out, s)
}
func autoConvert_admission_AdmissionReview_To_v1alpha1_AdmissionReview(in *admission.AdmissionReview, out *AdmissionReview, s conversion.Scope) error {
if err := Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
if err := Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(&in.Status, &out.Status, s); err != nil {
return err
}
return nil
}
// Convert_admission_AdmissionReview_To_v1alpha1_AdmissionReview is an autogenerated conversion function.
func Convert_admission_AdmissionReview_To_v1alpha1_AdmissionReview(in *admission.AdmissionReview, out *AdmissionReview, s conversion.Scope) error {
return autoConvert_admission_AdmissionReview_To_v1alpha1_AdmissionReview(in, out, s)
}
func autoConvert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(in *AdmissionReviewSpec, out *admission.AdmissionReviewSpec, s conversion.Scope) error {
out.Kind = in.Kind
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Object, &out.Object, s); err != nil {
return err
}
if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.OldObject, &out.OldObject, s); err != nil {
return err
}
out.Operation = pkg_admission.Operation(in.Operation)
out.Name = in.Name
out.Namespace = in.Namespace
out.Resource = in.Resource
out.SubResource = in.SubResource
// TODO: Inefficient conversion - can we improve it?
if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil {
return err
}
return nil
}
// Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec is an autogenerated conversion function.
func Convert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(in *AdmissionReviewSpec, out *admission.AdmissionReviewSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_AdmissionReviewSpec_To_admission_AdmissionReviewSpec(in, out, s)
}
func autoConvert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(in *admission.AdmissionReviewSpec, out *AdmissionReviewSpec, s conversion.Scope) error {
out.Kind = in.Kind
out.Name = in.Name
out.Namespace = in.Namespace
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Object, &out.Object, s); err != nil {
return err
}
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.OldObject, &out.OldObject, s); err != nil {
return err
}
out.Operation = pkg_admission.Operation(in.Operation)
out.Resource = in.Resource
out.SubResource = in.SubResource
// TODO: Inefficient conversion - can we improve it?
if err := s.Convert(&in.UserInfo, &out.UserInfo, 0); err != nil {
return err
}
return nil
}
// Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec is an autogenerated conversion function.
func Convert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(in *admission.AdmissionReviewSpec, out *AdmissionReviewSpec, s conversion.Scope) error {
return autoConvert_admission_AdmissionReviewSpec_To_v1alpha1_AdmissionReviewSpec(in, out, s)
}
func autoConvert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(in *AdmissionReviewStatus, out *admission.AdmissionReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Result = (*v1.Status)(unsafe.Pointer(in.Result))
return nil
}
// Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus is an autogenerated conversion function.
func Convert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(in *AdmissionReviewStatus, out *admission.AdmissionReviewStatus, s conversion.Scope) error {
return autoConvert_v1alpha1_AdmissionReviewStatus_To_admission_AdmissionReviewStatus(in, out, s)
}
func autoConvert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(in *admission.AdmissionReviewStatus, out *AdmissionReviewStatus, s conversion.Scope) error {
out.Allowed = in.Allowed
out.Result = (*v1.Status)(unsafe.Pointer(in.Result))
return nil
}
// Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus is an autogenerated conversion function.
func Convert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(in *admission.AdmissionReviewStatus, out *AdmissionReviewStatus, s conversion.Scope) error {
return autoConvert_admission_AdmissionReviewStatus_To_v1alpha1_AdmissionReviewStatus(in, out, s)
}

View File

@ -0,0 +1,100 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package v1alpha1
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
v1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
reflect "reflect"
)
func init() {
SchemeBuilder.Register(RegisterDeepCopies)
}
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
// to allow building arbitrary schemes.
func RegisterDeepCopies(scheme *runtime.Scheme) error {
return scheme.AddGeneratedDeepCopyFuncs(
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_AdmissionReview, InType: reflect.TypeOf(&AdmissionReview{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_AdmissionReviewSpec, InType: reflect.TypeOf(&AdmissionReviewSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_v1alpha1_AdmissionReviewStatus, InType: reflect.TypeOf(&AdmissionReviewStatus{})},
)
}
// DeepCopy_v1alpha1_AdmissionReview is an autogenerated deepcopy function.
func DeepCopy_v1alpha1_AdmissionReview(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*AdmissionReview)
out := out.(*AdmissionReview)
*out = *in
if err := DeepCopy_v1alpha1_AdmissionReviewSpec(&in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_v1alpha1_AdmissionReviewStatus(&in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
}
// DeepCopy_v1alpha1_AdmissionReviewSpec is an autogenerated deepcopy function.
func DeepCopy_v1alpha1_AdmissionReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*AdmissionReviewSpec)
out := out.(*AdmissionReviewSpec)
*out = *in
if newVal, err := c.DeepCopy(&in.Object); err != nil {
return err
} else {
out.Object = *newVal.(*runtime.RawExtension)
}
if newVal, err := c.DeepCopy(&in.OldObject); err != nil {
return err
} else {
out.OldObject = *newVal.(*runtime.RawExtension)
}
if err := v1.DeepCopy_v1_UserInfo(&in.UserInfo, &out.UserInfo, c); err != nil {
return err
}
return nil
}
}
// DeepCopy_v1alpha1_AdmissionReviewStatus is an autogenerated deepcopy function.
func DeepCopy_v1alpha1_AdmissionReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*AdmissionReviewStatus)
out := out.(*AdmissionReviewStatus)
*out = *in
if in.Result != nil {
in, out := &in.Result, &out.Result
if newVal, err := c.DeepCopy(*in); err != nil {
return err
} else {
*out = newVal.(*meta_v1.Status)
}
}
return nil
}
}

View File

@ -0,0 +1,32 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by defaulter-gen. Do not edit it manually!
package v1alpha1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// RegisterDefaults adds defaulters functions to the given scheme.
// Public to allow building arbitrary schemes.
// All generated defaulters are covering - they call all nested defaulters.
func RegisterDefaults(scheme *runtime.Scheme) error {
return nil
}

View File

@ -0,0 +1,106 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package admission
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
authentication "k8s.io/kubernetes/pkg/apis/authentication"
reflect "reflect"
)
func init() {
SchemeBuilder.Register(RegisterDeepCopies)
}
// RegisterDeepCopies adds deep-copy functions to the given scheme. Public
// to allow building arbitrary schemes.
func RegisterDeepCopies(scheme *runtime.Scheme) error {
return scheme.AddGeneratedDeepCopyFuncs(
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_admission_AdmissionReview, InType: reflect.TypeOf(&AdmissionReview{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_admission_AdmissionReviewSpec, InType: reflect.TypeOf(&AdmissionReviewSpec{})},
conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_admission_AdmissionReviewStatus, InType: reflect.TypeOf(&AdmissionReviewStatus{})},
)
}
// DeepCopy_admission_AdmissionReview is an autogenerated deepcopy function.
func DeepCopy_admission_AdmissionReview(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*AdmissionReview)
out := out.(*AdmissionReview)
*out = *in
if err := DeepCopy_admission_AdmissionReviewSpec(&in.Spec, &out.Spec, c); err != nil {
return err
}
if err := DeepCopy_admission_AdmissionReviewStatus(&in.Status, &out.Status, c); err != nil {
return err
}
return nil
}
}
// DeepCopy_admission_AdmissionReviewSpec is an autogenerated deepcopy function.
func DeepCopy_admission_AdmissionReviewSpec(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*AdmissionReviewSpec)
out := out.(*AdmissionReviewSpec)
*out = *in
// in.Object is kind 'Interface'
if in.Object != nil {
if newVal, err := c.DeepCopy(&in.Object); err != nil {
return err
} else {
out.Object = *newVal.(*runtime.Object)
}
}
// in.OldObject is kind 'Interface'
if in.OldObject != nil {
if newVal, err := c.DeepCopy(&in.OldObject); err != nil {
return err
} else {
out.OldObject = *newVal.(*runtime.Object)
}
}
if err := authentication.DeepCopy_authentication_UserInfo(&in.UserInfo, &out.UserInfo, c); err != nil {
return err
}
return nil
}
}
// DeepCopy_admission_AdmissionReviewStatus is an autogenerated deepcopy function.
func DeepCopy_admission_AdmissionReviewStatus(in interface{}, out interface{}, c *conversion.Cloner) error {
{
in := in.(*AdmissionReviewStatus)
out := out.(*AdmissionReviewStatus)
*out = *in
if in.Result != nil {
in, out := &in.Result, &out.Result
if newVal, err := c.DeepCopy(*in); err != nil {
return err
} else {
*out = newVal.(*v1.Status)
}
}
return nil
}
}

View File

@ -973,53 +973,26 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
Protocol listeners.Protocol
Port int
}
var listenerIDs []string
lbListeners := make(map[portKey]listeners.Listener)
err = listeners.List(lbaas.network, listeners.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
listenersList, err := listeners.ExtractListeners(page)
if err != nil {
return false, err
}
for _, l := range listenersList {
for _, lb := range l.Loadbalancers {
// Double check this Listener belongs to the LB we're updating. Neutron's API filtering
// can't be counted on in older releases (i.e Liberty).
if loadbalancer.ID == lb.ID {
key := portKey{Protocol: listeners.Protocol(l.Protocol), Port: l.ProtocolPort}
lbListeners[key] = l
break
}
}
}
return true, nil
})
allListeners, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID)
if err != nil {
return err
return fmt.Errorf("Error getting listeners for LB %s: %v", loadBalancerName, err)
}
for _, l := range allListeners {
key := portKey{Protocol: listeners.Protocol(l.Protocol), Port: l.ProtocolPort}
lbListeners[key] = l
listenerIDs = append(listenerIDs, l.ID)
}
// Get all pools for this loadbalancer, by listener ID.
lbPools := make(map[string]v2pools.Pool)
err = v2pools.List(lbaas.network, v2pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2pools.ExtractPools(page)
for _, listenerID := range listenerIDs {
pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listenerID)
if err != nil {
return false, err
return fmt.Errorf("Error getting pool for listener %s: %v", listenerID, err)
}
for _, p := range poolsList {
for _, l := range p.Listeners {
// Double check this Pool belongs to the LB we're deleting. Neutron's API filtering
// can't be counted on in older releases (i.e Liberty).
for _, val := range lbListeners {
if val.ID == l.ID {
lbPools[l.ID] = p
break
}
}
}
}
return true, nil
})
if err != nil {
return err
lbPools[listenerID] = *pool
}
// Compose Set of member (addresses) that _should_ exist
@ -1050,19 +1023,13 @@ func (lbaas *LbaasV2) UpdateLoadBalancer(clusterName string, service *v1.Service
}
// Find existing pool members (by address) for this port
members := make(map[string]v2pools.Member)
err := v2pools.ListMembers(lbaas.network, pool.ID, v2pools.ListMembersOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2pools.ExtractMembers(page)
if err != nil {
return false, err
}
for _, member := range membersList {
members[member.Address] = member
}
return true, nil
})
getMembers, err := getMembersByPoolID(lbaas.network, pool.ID)
if err != nil {
return err
return fmt.Errorf("Error getting pool members %s: %v", pool.ID, err)
}
members := make(map[string]v2pools.Member)
for _, member := range getMembers {
members[member.Address] = member
}
// Add any new members for this port
@ -1125,60 +1092,32 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
}
// get all listeners associated with this loadbalancer
var listenerIDs []string
err = listeners.List(lbaas.network, listeners.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
listenerList, err := listeners.ExtractListeners(page)
if err != nil {
return false, err
}
for _, listener := range listenerList {
listenerIDs = append(listenerIDs, listener.ID)
}
return true, nil
})
listenerList, err := getListenersByLoadBalancerID(lbaas.network, loadbalancer.ID)
if err != nil {
return err
return fmt.Errorf("Error getting LB %s listeners: %v", loadbalancer.ID, err)
}
// get all pools (and health monitors) associated with this loadbalancer
var poolIDs []string
var monitorIDs []string
err = v2pools.List(lbaas.network, v2pools.ListOpts{LoadbalancerID: loadbalancer.ID}).EachPage(func(page pagination.Page) (bool, error) {
poolsList, err := v2pools.ExtractPools(page)
for _, listener := range listenerList {
pool, err := getPoolByListenerID(lbaas.network, loadbalancer.ID, listener.ID)
if err != nil {
return false, err
return fmt.Errorf("Error getting pool for listener %s: %v", listener.ID, err)
}
for _, pool := range poolsList {
poolIDs = append(poolIDs, pool.ID)
monitorIDs = append(monitorIDs, pool.MonitorID)
}
return true, nil
})
if err != nil {
return err
poolIDs = append(poolIDs, pool.ID)
monitorIDs = append(monitorIDs, pool.MonitorID)
}
// get all members associated with each poolIDs
var memberIDs []string
for _, poolID := range poolIDs {
err := v2pools.ListMembers(lbaas.network, poolID, v2pools.ListMembersOpts{}).EachPage(func(page pagination.Page) (bool, error) {
membersList, err := v2pools.ExtractMembers(page)
if err != nil {
return false, err
}
for _, member := range membersList {
memberIDs = append(memberIDs, member.ID)
}
return true, nil
})
if err != nil {
return err
for _, pool := range poolIDs {
membersList, err := getMembersByPoolID(lbaas.network, pool)
if err != nil && !isNotFound(err) {
return fmt.Errorf("Error getting pool members %s: %v", pool, err)
}
for _, member := range membersList {
memberIDs = append(memberIDs, member.ID)
}
}
@ -1211,8 +1150,8 @@ func (lbaas *LbaasV2) EnsureLoadBalancerDeleted(clusterName string, service *v1.
}
// delete all listeners
for _, listenerID := range listenerIDs {
err := listeners.Delete(lbaas.network, listenerID).ExtractErr()
for _, listener := range listenerList {
err := listeners.Delete(lbaas.network, listener.ID).ExtractErr()
if err != nil && !isNotFound(err) {
return err
}

View File

@ -43,6 +43,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@ -54,7 +55,6 @@ go_library(
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/clock:go_default_library",
"//vendor/k8s.io/client-go/util/integer:go_default_library",
],
)
@ -80,13 +80,13 @@ go_test(
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/client-go/pkg/apis/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/clock:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)

View File

@ -34,9 +34,9 @@ import (
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/clock"
"k8s.io/client-go/util/integer"
"k8s.io/kubernetes/pkg/api"

View File

@ -30,12 +30,12 @@ import (
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/clock"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"

View File

@ -841,8 +841,8 @@ func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
// current with the new status of the deployment that the controller is observing. More specifically,
// when new pods are scaled up or become available, or old pods are scaled down, then we consider the
// deployment is progressing.
// when new pods are scaled up or become ready or available, or old pods are scaled down, then we
// consider the deployment is progressing.
func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
oldStatus := deployment.Status
@ -852,6 +852,7 @@ func DeploymentProgressing(deployment *extensions.Deployment, newStatus *extensi
return (newStatus.UpdatedReplicas > oldStatus.UpdatedReplicas) ||
(newStatusOldReplicas < oldStatusOldReplicas) ||
newStatus.ReadyReplicas > deployment.Status.ReadyReplicas ||
newStatus.AvailableReplicas > deployment.Status.AvailableReplicas
}

View File

@ -1002,18 +1002,22 @@ func TestDeploymentComplete(t *testing.T) {
}
func TestDeploymentProgressing(t *testing.T) {
deployment := func(current, updated int32) *extensions.Deployment {
deployment := func(current, updated, ready, available int32) *extensions.Deployment {
return &extensions.Deployment{
Status: extensions.DeploymentStatus{
Replicas: current,
UpdatedReplicas: updated,
Replicas: current,
UpdatedReplicas: updated,
ReadyReplicas: ready,
AvailableReplicas: available,
},
}
}
newStatus := func(current, updated int32) extensions.DeploymentStatus {
newStatus := func(current, updated, ready, available int32) extensions.DeploymentStatus {
return extensions.DeploymentStatus{
Replicas: current,
UpdatedReplicas: updated,
Replicas: current,
UpdatedReplicas: updated,
ReadyReplicas: ready,
AvailableReplicas: available,
}
}
@ -1026,52 +1030,60 @@ func TestDeploymentProgressing(t *testing.T) {
expected bool
}{
{
name: "progressing",
name: "progressing: updated pods",
d: deployment(10, 4),
newStatus: newStatus(10, 6),
d: deployment(10, 4, 4, 4),
newStatus: newStatus(10, 6, 4, 4),
expected: true,
},
{
name: "not progressing",
d: deployment(10, 4),
newStatus: newStatus(10, 4),
d: deployment(10, 4, 4, 4),
newStatus: newStatus(10, 4, 4, 4),
expected: false,
},
{
name: "progressing #2",
name: "progressing: old pods removed",
d: deployment(10, 4),
newStatus: newStatus(8, 4),
d: deployment(10, 4, 6, 6),
newStatus: newStatus(8, 4, 6, 6),
expected: true,
},
{
name: "not progressing #2",
name: "not progressing: less new pods",
d: deployment(10, 7),
newStatus: newStatus(10, 6),
d: deployment(10, 7, 3, 3),
newStatus: newStatus(10, 6, 3, 3),
expected: false,
},
{
name: "progressing #3",
name: "progressing: less overall but more new pods",
d: deployment(10, 4),
newStatus: newStatus(8, 8),
d: deployment(10, 4, 7, 7),
newStatus: newStatus(8, 8, 5, 5),
expected: true,
},
{
name: "not progressing #2",
name: "progressing: more ready pods",
d: deployment(10, 7),
newStatus: newStatus(10, 7),
d: deployment(10, 10, 9, 8),
newStatus: newStatus(10, 10, 10, 8),
expected: false,
expected: true,
},
{
name: "progressing: more available pods",
d: deployment(10, 10, 10, 9),
newStatus: newStatus(10, 10, 10, 10),
expected: true,
},
}

View File

@ -38,6 +38,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
@ -46,7 +47,6 @@ go_library(
"//vendor/k8s.io/client-go/dynamic:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/util/clock:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
],
)

Some files were not shown because too many files have changed in this diff Show More