Merge branch 'master' into master
This commit is contained in:
2165
CHANGELOG.md
2165
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
282
Godeps/Godeps.json
generated
282
Godeps/Godeps.json
generated
@@ -30,61 +30,71 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/compute",
|
||||
"Comment": "v7.0.1-beta",
|
||||
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/containerregistry",
|
||||
"Comment": "v7.0.1-beta",
|
||||
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/disk",
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/network",
|
||||
"Comment": "v7.0.1-beta",
|
||||
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/arm/storage",
|
||||
"Comment": "v7.0.1-beta",
|
||||
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
|
||||
"Comment": "v7.0.1-beta",
|
||||
"Rev": "0984e0641ae43b89283223034574d6465be93bf4"
|
||||
"Comment": "v10.0.4-beta-1-g786cc84",
|
||||
"Rev": "786cc84138518bf7fd6d60e92fad1ac9d1a117ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-ansiterm",
|
||||
"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
|
||||
"Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-ansiterm/winterm",
|
||||
"Rev": "70b2c90b260171e829f1ebd7c17f600c11858dbe"
|
||||
"Rev": "fa152c58bc15761d0200cb75fe958b89a9d4888e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-autorest/autorest",
|
||||
"Comment": "v7.2.3",
|
||||
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
|
||||
"Comment": "v8.0.0",
|
||||
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-autorest/autorest/adal",
|
||||
"Comment": "v8.0.0",
|
||||
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-autorest/autorest/azure",
|
||||
"Comment": "v7.2.3",
|
||||
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
|
||||
"Comment": "v8.0.0",
|
||||
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-autorest/autorest/date",
|
||||
"Comment": "v7.2.3",
|
||||
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
|
||||
"Comment": "v8.0.0",
|
||||
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-autorest/autorest/to",
|
||||
"Comment": "v7.2.3",
|
||||
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
|
||||
"Comment": "v8.0.0",
|
||||
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Azure/go-autorest/autorest/validation",
|
||||
"Comment": "v7.2.3",
|
||||
"Rev": "d7c034a8af24eda120dd6460bfcd6d9ed14e43ca"
|
||||
"Comment": "v8.0.0",
|
||||
"Rev": "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/MakeNowJust/heredoc",
|
||||
@@ -95,6 +105,10 @@
|
||||
"Comment": "v0.4.2",
|
||||
"Rev": "f533f7a102197536779ea3a8cb881d639e21ec5a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/NYTimes/gziphandler",
|
||||
"Rev": "56545f4a5d46df9a6648819d1664c3a03a13ffdb"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/PuerkitoBio/purell",
|
||||
"Comment": "v1.0.0",
|
||||
@@ -411,7 +425,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/negroni",
|
||||
"Comment": "v0.1-62-g8d75e11",
|
||||
"Comment": "v0.1.0-62-g8d75e11",
|
||||
"Rev": "8d75e11374a1928608c906fe745b538483e7aeb2"
|
||||
},
|
||||
{
|
||||
@@ -810,7 +824,8 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d"
|
||||
"Comment": "v1.1.0-1-g782f496",
|
||||
"Rev": "782f4967f2dc4564575ca782fe2d04090b5faca8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/daviddengcn/go-colortext",
|
||||
@@ -831,50 +846,130 @@
|
||||
"Comment": "v2.4.0-rc.1-38-gcd27f179",
|
||||
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/blkiodev",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/container",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/events",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/filters",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/mount",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/network",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/reference",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/registry",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/strslice",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/swarm",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/time",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/versions",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/api/types/volume",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/client",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/jsonlog",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/jsonmessage",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/longpath",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/stdcopy",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/symlink",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/system",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term/windows",
|
||||
"Comment": "v1.11.2",
|
||||
"Rev": "b9f10c951893f9a00865890a5232e85d770c1087"
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/tlsconfig",
|
||||
"Comment": "v1.13.1-rc2",
|
||||
"Rev": "54f71fd84a0dabab9d45f5fe7543a028b1200ca1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/engine-api/client",
|
||||
@@ -1683,18 +1778,18 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/heketi/heketi/client/api/go-client",
|
||||
"Comment": "v4.0.0-22-g7a54b6f",
|
||||
"Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2"
|
||||
"Comment": "v4.0.0-95-gaaf4061",
|
||||
"Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/heketi/heketi/pkg/glusterfs/api",
|
||||
"Comment": "v4.0.0-22-g7a54b6f",
|
||||
"Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2"
|
||||
"Comment": "v4.0.0-95-gaaf4061",
|
||||
"Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/heketi/heketi/pkg/utils",
|
||||
"Comment": "v4.0.0-22-g7a54b6f",
|
||||
"Rev": "7a54b6fc903feab1e7cb6573177ca09b544eb1e2"
|
||||
"Comment": "v4.0.0-95-gaaf4061",
|
||||
"Rev": "aaf40619d85fda757e7a1c1ea1b5118cea65594b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/howeyc/gopass",
|
||||
@@ -1820,31 +1915,6 @@
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/detector",
|
||||
"Comment": "before-0.26-protos-33-g45c8b08",
|
||||
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/detector/zoo",
|
||||
"Comment": "before-0.26-protos-33-g45c8b08",
|
||||
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/mesosproto",
|
||||
"Comment": "before-0.26-protos-33-g45c8b08",
|
||||
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/mesosutil",
|
||||
"Comment": "before-0.26-protos-33-g45c8b08",
|
||||
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mesos/mesos-go/upid",
|
||||
"Comment": "before-0.26-protos-33-g45c8b08",
|
||||
"Rev": "45c8b08e9af666add36a6f93ff8c1c75812367b0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/miekg/coredns/middleware/etcd/msg",
|
||||
"Comment": "v003",
|
||||
@@ -2077,82 +2147,82 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/apparmor",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/systemd",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs/validate",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/criurpc",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/keys",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/label",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/seccomp",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/selinux",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/stacktrace",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/user",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/utils",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2ad",
|
||||
"Comment": "v1.0.0-rc2-49-gd223e2a",
|
||||
"Rev": "d223e2adae83f62d58448a799a5da05730228089"
|
||||
},
|
||||
{
|
||||
@@ -2161,6 +2231,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pelletier/go-buffruneio",
|
||||
"Comment": "v0.1.0",
|
||||
"Rev": "df1e16fde7fc330a0ca68167c23bf7ed6ac31d6d"
|
||||
},
|
||||
{
|
||||
@@ -2218,7 +2289,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/quobyte/api",
|
||||
"Rev": "bf713b5a4333f44504fa1ce63690de45cfed6413"
|
||||
"Rev": "cb10db90715b14d4784465d2fa3b915dfacc0628"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rackspace/gophercloud",
|
||||
@@ -2350,8 +2421,9 @@
|
||||
"Rev": "300106c228d52c8941d4b3de6054a6062a86dda3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/samuel/go-zookeeper/zk",
|
||||
"Rev": "177002e16a0061912f02377e2dd8951a8b3551bc"
|
||||
"ImportPath": "github.com/satori/uuid",
|
||||
"Comment": "v1.1.0-8-g5bf94b6",
|
||||
"Rev": "5bf94b69c6b68ee1b541973bb8e1144db23a194b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/seccomp/libseccomp-golang",
|
||||
@@ -2423,18 +2495,18 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.0-88-ge3a8ff8",
|
||||
"Rev": "e3a8ff8ce36581f87a15341206f205b1da467059"
|
||||
"Comment": "v1.1.4-66-gf6abca5",
|
||||
"Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/mock",
|
||||
"Comment": "v1.0-88-ge3a8ff8",
|
||||
"Rev": "e3a8ff8ce36581f87a15341206f205b1da467059"
|
||||
"Comment": "v1.1.4-66-gf6abca5",
|
||||
"Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Comment": "v1.0-88-ge3a8ff8",
|
||||
"Rev": "e3a8ff8ce36581f87a15341206f205b1da467059"
|
||||
"Comment": "v1.1.4-66-gf6abca5",
|
||||
"Rev": "f6abca593680b2315d2075e0f5e2a9751e3f431a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/gocapability/capability",
|
||||
@@ -2568,6 +2640,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/xiang90/probing",
|
||||
"Comment": "0.0.1",
|
||||
"Rev": "07dd2e8dfe18522e9c447ba95f2fe95262f63bb2"
|
||||
},
|
||||
{
|
||||
@@ -2779,10 +2852,18 @@
|
||||
"ImportPath": "golang.org/x/tools/container/intsets",
|
||||
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/cloudkms/v1",
|
||||
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/cloudmonitoring/v2beta2",
|
||||
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/compute/v0.alpha",
|
||||
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/compute/v0.beta",
|
||||
"Rev": "e3824ed33c72bf7e81da0286772c34b987520914"
|
||||
@@ -2870,22 +2951,27 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1/scanner",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1/token",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/gcfg.v1/types",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "083575c3955c85df16fe9590cceab64d03f5eb6e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/inf.v0",
|
||||
"Comment": "v0.9.0",
|
||||
"Rev": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4"
|
||||
},
|
||||
{
|
||||
|
4773
Godeps/LICENSES
generated
4773
Godeps/LICENSES
generated
File diff suppressed because it is too large
Load Diff
@@ -3,11 +3,13 @@ aliases:
|
||||
- davidopp
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
- k82cn
|
||||
sig-scheduling:
|
||||
- davidopp
|
||||
- bsalamat
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
- k82cn
|
||||
- k82cn
|
||||
- jayunit100
|
||||
sig-cli-maintainers:
|
||||
- adohe
|
||||
@@ -52,7 +54,7 @@ aliases:
|
||||
- pmorie
|
||||
- resouer
|
||||
- sjpotter
|
||||
- timstclair
|
||||
- tallclair
|
||||
- tmrts
|
||||
- vishh
|
||||
- yifan-gu
|
||||
|
@@ -17,7 +17,6 @@ reviewers:
|
||||
- mikedanese
|
||||
- liggitt
|
||||
- nikhiljindal
|
||||
- bprashanth
|
||||
- gmarek
|
||||
- erictune
|
||||
- davidopp
|
||||
@@ -33,7 +32,7 @@ reviewers:
|
||||
- pwittrock
|
||||
- roberthbailey
|
||||
- ncdc
|
||||
- timstclair
|
||||
- tallclair
|
||||
- yifan-gu
|
||||
- eparis
|
||||
- mwielgus
|
||||
|
@@ -33551,450 +33551,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/extensions/v1beta1/thirdpartyresources": {
|
||||
"get": {
|
||||
"description": "list or watch objects of kind ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "listExtensionsV1beta1ThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"name": "fieldSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"name": "includeUninitialized",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"name": "labelSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"name": "resourceVersion",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "integer",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"name": "timeoutSeconds",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"name": "watch",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResourceList"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "list",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"description": "create a ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "createExtensionsV1beta1ThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "post",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"description": "delete collection of ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "deleteExtensionsV1beta1CollectionThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"name": "fieldSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"name": "includeUninitialized",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"name": "labelSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"name": "resourceVersion",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "integer",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"name": "timeoutSeconds",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"name": "watch",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "deletecollection",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"name": "pretty",
|
||||
"in": "query"
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/extensions/v1beta1/thirdpartyresources/{name}": {
|
||||
"get": {
|
||||
"description": "read the specified ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "readExtensionsV1beta1ThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.",
|
||||
"name": "exact",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "Should this value be exported. Export strips fields that a user can not specify.",
|
||||
"name": "export",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "get",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"put": {
|
||||
"description": "replace the specified ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "replaceExtensionsV1beta1ThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "put",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"description": "delete a ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "deleteExtensionsV1beta1ThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
|
||||
}
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "integer",
|
||||
"description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
|
||||
"name": "gracePeriodSeconds",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
|
||||
"name": "orphanDependents",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.",
|
||||
"name": "propagationPolicy",
|
||||
"in": "query"
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "delete",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"patch": {
|
||||
"description": "partially update the specified ThirdPartyResource",
|
||||
"consumes": [
|
||||
"application/json-patch+json",
|
||||
"application/merge-patch+json",
|
||||
"application/strategic-merge-patch+json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "patchExtensionsV1beta1ThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "patch",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "name of the ThirdPartyResource",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"name": "pretty",
|
||||
"in": "query"
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/extensions/v1beta1/watch/daemonsets": {
|
||||
"get": {
|
||||
"description": "watch individual changes to a list of DaemonSet",
|
||||
@@ -35653,194 +35209,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/extensions/v1beta1/watch/thirdpartyresources": {
|
||||
"get": {
|
||||
"description": "watch individual changes to a list of ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "watchExtensionsV1beta1ThirdPartyResourceList",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "watchlist",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"name": "fieldSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"name": "includeUninitialized",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"name": "labelSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"name": "pretty",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"name": "resourceVersion",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "integer",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"name": "timeoutSeconds",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"name": "watch",
|
||||
"in": "query"
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/extensions/v1beta1/watch/thirdpartyresources/{name}": {
|
||||
"get": {
|
||||
"description": "watch changes to an object of kind ThirdPartyResource",
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"extensions_v1beta1"
|
||||
],
|
||||
"operationId": "watchExtensionsV1beta1ThirdPartyResource",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-action": "watch",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"name": "fieldSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"name": "includeUninitialized",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"name": "labelSelector",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "name of the ThirdPartyResource",
|
||||
"name": "name",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"name": "pretty",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "string",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"name": "resourceVersion",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "integer",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"name": "timeoutSeconds",
|
||||
"in": "query"
|
||||
},
|
||||
{
|
||||
"uniqueItems": true,
|
||||
"type": "boolean",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"name": "watch",
|
||||
"in": "query"
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/networking.k8s.io/": {
|
||||
"get": {
|
||||
"description": "get information of a group",
|
||||
@@ -52779,15 +52147,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.extensions.v1beta1.APIVersion": {
|
||||
"description": "An APIVersion represents a single concrete version of an object model.",
|
||||
"properties": {
|
||||
"name": {
|
||||
"description": "Name of this version (e.g. 'v1').",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.extensions.v1beta1.DaemonSet": {
|
||||
"description": "DaemonSet represents the configuration of a daemon set.",
|
||||
"properties": {
|
||||
@@ -54008,75 +53367,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"io.k8s.api.extensions.v1beta1.ThirdPartyResource": {
|
||||
"description": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"description": "Description is the description of this object.",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard object metadata",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
|
||||
},
|
||||
"versions": {
|
||||
"description": "Versions are versions for this third party object",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.APIVersion"
|
||||
}
|
||||
}
|
||||
},
|
||||
"x-kubernetes-group-version-kind": [
|
||||
{
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResource"
|
||||
}
|
||||
]
|
||||
},
|
||||
"io.k8s.api.extensions.v1beta1.ThirdPartyResourceList": {
|
||||
"description": "ThirdPartyResourceList is a list of ThirdPartyResources.",
|
||||
"required": [
|
||||
"items"
|
||||
],
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources",
|
||||
"type": "string"
|
||||
},
|
||||
"items": {
|
||||
"description": "Items is the list of ThirdPartyResources.",
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.extensions.v1beta1.ThirdPartyResource"
|
||||
}
|
||||
},
|
||||
"kind": {
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"description": "Standard list metadata.",
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
|
||||
}
|
||||
},
|
||||
"x-kubernetes-group-version-kind": [
|
||||
{
|
||||
"group": "extensions",
|
||||
"version": "v1beta1",
|
||||
"kind": "ThirdPartyResourceList"
|
||||
}
|
||||
]
|
||||
},
|
||||
"io.k8s.api.networking.v1.NetworkPolicy": {
|
||||
"description": "NetworkPolicy describes what network traffic is allowed for a set of Pods",
|
||||
"properties": {
|
||||
|
@@ -6144,621 +6144,6 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/apis/extensions/v1beta1/thirdpartyresources",
|
||||
"description": "API at /apis/extensions/v1beta1",
|
||||
"operations": [
|
||||
{
|
||||
"type": "v1beta1.ThirdPartyResourceList",
|
||||
"method": "GET",
|
||||
"summary": "list or watch objects of kind ThirdPartyResource",
|
||||
"nickname": "listThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "labelSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "fieldSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "includeUninitialized",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "watch",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "resourceVersion",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"paramType": "query",
|
||||
"name": "timeoutSeconds",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1beta1.ThirdPartyResourceList"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "v1beta1.ThirdPartyResource",
|
||||
"method": "POST",
|
||||
"summary": "create a ThirdPartyResource",
|
||||
"nickname": "createThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "v1beta1.ThirdPartyResource",
|
||||
"paramType": "body",
|
||||
"name": "body",
|
||||
"description": "",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1beta1.ThirdPartyResource"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "v1.Status",
|
||||
"method": "DELETE",
|
||||
"summary": "delete collection of ThirdPartyResource",
|
||||
"nickname": "deletecollectionThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "labelSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "fieldSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "includeUninitialized",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "watch",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "resourceVersion",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"paramType": "query",
|
||||
"name": "timeoutSeconds",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1.Status"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/apis/extensions/v1beta1/watch/thirdpartyresources",
|
||||
"description": "API at /apis/extensions/v1beta1",
|
||||
"operations": [
|
||||
{
|
||||
"type": "v1.WatchEvent",
|
||||
"method": "GET",
|
||||
"summary": "watch individual changes to a list of ThirdPartyResource",
|
||||
"nickname": "watchThirdPartyResourceList",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "labelSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "fieldSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "includeUninitialized",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "watch",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "resourceVersion",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"paramType": "query",
|
||||
"name": "timeoutSeconds",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1.WatchEvent"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/apis/extensions/v1beta1/thirdpartyresources/{name}",
|
||||
"description": "API at /apis/extensions/v1beta1",
|
||||
"operations": [
|
||||
{
|
||||
"type": "v1beta1.ThirdPartyResource",
|
||||
"method": "GET",
|
||||
"summary": "read the specified ThirdPartyResource",
|
||||
"nickname": "readThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "export",
|
||||
"description": "Should this value be exported. Export strips fields that a user can not specify.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "exact",
|
||||
"description": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "path",
|
||||
"name": "name",
|
||||
"description": "name of the ThirdPartyResource",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1beta1.ThirdPartyResource"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "v1beta1.ThirdPartyResource",
|
||||
"method": "PUT",
|
||||
"summary": "replace the specified ThirdPartyResource",
|
||||
"nickname": "replaceThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "v1beta1.ThirdPartyResource",
|
||||
"paramType": "body",
|
||||
"name": "body",
|
||||
"description": "",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "path",
|
||||
"name": "name",
|
||||
"description": "name of the ThirdPartyResource",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1beta1.ThirdPartyResource"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "v1beta1.ThirdPartyResource",
|
||||
"method": "PATCH",
|
||||
"summary": "partially update the specified ThirdPartyResource",
|
||||
"nickname": "patchThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "v1.Patch",
|
||||
"paramType": "body",
|
||||
"name": "body",
|
||||
"description": "",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "path",
|
||||
"name": "name",
|
||||
"description": "name of the ThirdPartyResource",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1beta1.ThirdPartyResource"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"consumes": [
|
||||
"application/json-patch+json",
|
||||
"application/merge-patch+json",
|
||||
"application/strategic-merge-patch+json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "v1.Status",
|
||||
"method": "DELETE",
|
||||
"summary": "delete a ThirdPartyResource",
|
||||
"nickname": "deleteThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "v1.DeleteOptions",
|
||||
"paramType": "body",
|
||||
"name": "body",
|
||||
"description": "",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"paramType": "query",
|
||||
"name": "gracePeriodSeconds",
|
||||
"description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "orphanDependents",
|
||||
"description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "propagationPolicy",
|
||||
"description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "path",
|
||||
"name": "name",
|
||||
"description": "name of the ThirdPartyResource",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1.Status"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/apis/extensions/v1beta1/watch/thirdpartyresources/{name}",
|
||||
"description": "API at /apis/extensions/v1beta1",
|
||||
"operations": [
|
||||
{
|
||||
"type": "v1.WatchEvent",
|
||||
"method": "GET",
|
||||
"summary": "watch changes to an object of kind ThirdPartyResource",
|
||||
"nickname": "watchThirdPartyResource",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "pretty",
|
||||
"description": "If 'true', then the output is pretty printed.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "labelSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "fieldSelector",
|
||||
"description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "includeUninitialized",
|
||||
"description": "If true, partially initialized resources are included in the response.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "boolean",
|
||||
"paramType": "query",
|
||||
"name": "watch",
|
||||
"description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "query",
|
||||
"name": "resourceVersion",
|
||||
"description": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "integer",
|
||||
"paramType": "query",
|
||||
"name": "timeoutSeconds",
|
||||
"description": "Timeout for the list/watch call.",
|
||||
"required": false,
|
||||
"allowMultiple": false
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"paramType": "path",
|
||||
"name": "name",
|
||||
"description": "name of the ThirdPartyResource",
|
||||
"required": true,
|
||||
"allowMultiple": false
|
||||
}
|
||||
],
|
||||
"responseMessages": [
|
||||
{
|
||||
"code": 200,
|
||||
"message": "OK",
|
||||
"responseModel": "v1.WatchEvent"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"consumes": [
|
||||
"*/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"path": "/apis/extensions/v1beta1",
|
||||
"description": "API at /apis/extensions/v1beta1",
|
||||
@@ -10300,73 +9685,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.ThirdPartyResourceList": {
|
||||
"id": "v1beta1.ThirdPartyResourceList",
|
||||
"description": "ThirdPartyResourceList is a list of ThirdPartyResources.",
|
||||
"required": [
|
||||
"items"
|
||||
],
|
||||
"properties": {
|
||||
"kind": {
|
||||
"type": "string",
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
|
||||
},
|
||||
"apiVersion": {
|
||||
"type": "string",
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources"
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "v1.ListMeta",
|
||||
"description": "Standard list metadata."
|
||||
},
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "v1beta1.ThirdPartyResource"
|
||||
},
|
||||
"description": "Items is the list of ThirdPartyResources."
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.ThirdPartyResource": {
|
||||
"id": "v1beta1.ThirdPartyResource",
|
||||
"description": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.",
|
||||
"properties": {
|
||||
"kind": {
|
||||
"type": "string",
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
|
||||
},
|
||||
"apiVersion": {
|
||||
"type": "string",
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources"
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "v1.ObjectMeta",
|
||||
"description": "Standard object metadata"
|
||||
},
|
||||
"description": {
|
||||
"type": "string",
|
||||
"description": "Description is the description of this object."
|
||||
},
|
||||
"versions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "v1beta1.APIVersion"
|
||||
},
|
||||
"description": "Versions are versions for this third party object"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1beta1.APIVersion": {
|
||||
"id": "v1beta1.APIVersion",
|
||||
"description": "An APIVersion represents a single concrete version of an object model.",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Name of this version (e.g. 'v1')."
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.APIResourceList": {
|
||||
"id": "v1.APIResourceList",
|
||||
"description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
|
||||
|
21
build/BUILD
21
build/BUILD
@@ -21,36 +21,23 @@ filegroup(
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
docker_build(
|
||||
name = "busybox",
|
||||
debs = [
|
||||
"@busybox_deb//file",
|
||||
],
|
||||
symlinks = {
|
||||
"/bin/sh": "/bin/busybox",
|
||||
"/usr/bin/busybox": "/bin/busybox",
|
||||
"/usr/sbin/busybox": "/bin/busybox",
|
||||
"/sbin/busybox": "/bin/busybox",
|
||||
},
|
||||
)
|
||||
|
||||
# This list should roughly match kube::build::get_docker_wrapped_binaries()
|
||||
# in build/common.sh.
|
||||
DOCKERIZED_BINARIES = {
|
||||
"cloud-controller-manager": {
|
||||
"base": ":busybox",
|
||||
"base": "@official_busybox//image:image.tar",
|
||||
"target": "//cmd/cloud-controller-manager:cloud-controller-manager",
|
||||
},
|
||||
"kube-apiserver": {
|
||||
"base": ":busybox",
|
||||
"base": "@official_busybox//image:image.tar",
|
||||
"target": "//cmd/kube-apiserver:kube-apiserver",
|
||||
},
|
||||
"kube-controller-manager": {
|
||||
"base": ":busybox",
|
||||
"base": "@official_busybox//image:image.tar",
|
||||
"target": "//cmd/kube-controller-manager:kube-controller-manager",
|
||||
},
|
||||
"kube-scheduler": {
|
||||
"base": ":busybox",
|
||||
"base": "@official_busybox//image:image.tar",
|
||||
"target": "//plugin/cmd/kube-scheduler:kube-scheduler",
|
||||
},
|
||||
"kube-proxy": {
|
||||
|
@@ -384,7 +384,13 @@ function kube::build::short_hash() {
|
||||
# a workaround for bug https://github.com/docker/docker/issues/3968.
|
||||
function kube::build::destroy_container() {
|
||||
"${DOCKER[@]}" kill "$1" >/dev/null 2>&1 || true
|
||||
"${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
|
||||
if [[ $("${DOCKER[@]}" version --format '{{.Server.Version}}') = 17.06.0* ]]; then
|
||||
# Workaround https://github.com/moby/moby/issues/33948.
|
||||
# TODO: remove when 17.06.0 is not relevant anymore
|
||||
DOCKER_API_VERSION=v1.29 "${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
|
||||
else
|
||||
"${DOCKER[@]}" wait "$1" >/dev/null 2>&1 || true
|
||||
fi
|
||||
"${DOCKER[@]}" rm -f -v "$1" >/dev/null 2>&1 || true
|
||||
}
|
||||
|
||||
|
@@ -126,6 +126,21 @@ verify: verify_generated_files
|
||||
KUBE_VERIFY_GIT_BRANCH=$(BRANCH) hack/make-rules/verify.sh -v
|
||||
endif
|
||||
|
||||
define QUICK_VERIFY_HELP_INFO
|
||||
# Runs only the presubmission verifications that aren't slow.
|
||||
#
|
||||
# Example:
|
||||
# make quick-verify
|
||||
endef
|
||||
.PHONY: quick-verify
|
||||
ifeq ($(PRINT_HELP),y)
|
||||
quick-verify:
|
||||
@echo "$$QUICK_VERIFY_HELP_INFO"
|
||||
else
|
||||
quick-verify: verify_generated_files
|
||||
hack/make-rules/verify.sh -v -Q
|
||||
endif
|
||||
|
||||
define UPDATE_HELP_INFO
|
||||
# Runs all the generated updates.
|
||||
#
|
||||
@@ -240,6 +255,11 @@ define TEST_E2E_NODE_HELP_INFO
|
||||
# IMAGE_SERVICE_ENDPOINT: remote image endpoint to connect to, to prepull images.
|
||||
# Used when RUNTIME is set to "remote".
|
||||
# IMAGE_CONFIG_FILE: path to a file containing image configuration.
|
||||
# SYSTEM_SPEC_NAME: The name of the system spec to be used for validating the
|
||||
# image in the node conformance test. The specs are located at
|
||||
# test/e2e_node/system/specs/. For example, "SYSTEM_SPEC_NAME=gke" will use
|
||||
# the spec at test/e2e_node/system/specs/gke.yaml. If unspecified, the
|
||||
# default built-in spec (system.DefaultSpec) will be used.
|
||||
#
|
||||
# Example:
|
||||
# make test-e2e-node FOCUS=Kubelet SKIP=container
|
||||
@@ -523,6 +543,20 @@ bazel-test:
|
||||
bazel test --test_tag_filters=-integration --flaky_test_attempts=3 //cmd/... //pkg/... //federation/... //plugin/... //third_party/... //hack/... //hack:verify-all //vendor/k8s.io/...
|
||||
endif
|
||||
|
||||
ifeq ($(PRINT_HELP),y)
|
||||
define BAZEL_TEST_INTEGRATION_HELP_INFO
|
||||
# Integration test with bazel
|
||||
#
|
||||
# Example:
|
||||
# make bazel-test-integration
|
||||
endef
|
||||
bazel-test-integration:
|
||||
@echo "$$BAZEL_TEST_INTEGRATION_HELP_INFO"
|
||||
else
|
||||
bazel-test-integration:
|
||||
bazel test //test/integration/...
|
||||
endif
|
||||
|
||||
ifeq ($(PRINT_HELP),y)
|
||||
define BAZEL_BUILD_HELP_INFO
|
||||
# Build release tars with bazel
|
||||
|
@@ -12,6 +12,16 @@ http_archive(
|
||||
urls = ["https://github.com/kubernetes/repo-infra/archive/9dedd5f4093884c133ad5ea73695b28338b954ab.tar.gz"],
|
||||
)
|
||||
|
||||
ETCD_VERSION = "3.0.17"
|
||||
|
||||
new_http_archive(
|
||||
name = "com_coreos_etcd",
|
||||
build_file = "third_party/etcd.BUILD",
|
||||
sha256 = "274c46a7f8d26f7ae99d6880610f54933cbcf7f3beafa19236c52eb5df8c7a0b",
|
||||
strip_prefix = "etcd-v%s-linux-amd64" % ETCD_VERSION,
|
||||
urls = ["https://github.com/coreos/etcd/releases/download/v%s/etcd-v%s-linux-amd64.tar.gz" % (ETCD_VERSION, ETCD_VERSION)],
|
||||
)
|
||||
|
||||
# This contains a patch to not prepend ./ to tarfiles produced by pkg_tar.
|
||||
# When merged upstream, we'll no longer need to use ixdy's fork:
|
||||
# https://bazel-review.googlesource.com/#/c/10390/
|
||||
@@ -24,9 +34,9 @@ http_archive(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_docker",
|
||||
sha256 = "261fbd8fda1d06a12a0479019b46acd302c6aaa8df8e49383dc37917f20492a1",
|
||||
strip_prefix = "rules_docker-52d9faf209ff6d16eb850b6b66d03483735e0633",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/52d9faf209ff6d16eb850b6b66d03483735e0633.tar.gz"],
|
||||
sha256 = "40d780165c0b9fbb3ddca858df7347381af0e87e430c74863e4ce9d6f6441023",
|
||||
strip_prefix = "rules_docker-8359263f35227a3634ea023ff4ae163189eb4b26",
|
||||
urls = ["https://github.com/bazelbuild/rules_docker/archive/8359263f35227a3634ea023ff4ae163189eb4b26.tar.gz"],
|
||||
)
|
||||
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_repositories")
|
||||
@@ -38,22 +48,6 @@ go_repositories(
|
||||
|
||||
docker_repositories()
|
||||
|
||||
# for building docker base images
|
||||
debs = (
|
||||
(
|
||||
"busybox_deb",
|
||||
"5f81f140777454e71b9e5bfdce9c89993de5ddf4a7295ea1cfda364f8f630947",
|
||||
"http://ftp.us.debian.org/debian/pool/main/b/busybox/busybox-static_1.22.0-19+b3_amd64.deb",
|
||||
"https://storage.googleapis.com/kubernetes-release/debs/busybox-static_1.22.0-19+b3_amd64.deb",
|
||||
),
|
||||
)
|
||||
|
||||
[http_file(
|
||||
name = name,
|
||||
sha256 = sha256,
|
||||
url = url,
|
||||
) for name, sha256, origin, url in debs]
|
||||
|
||||
http_file(
|
||||
name = "kubernetes_cni",
|
||||
sha256 = "05ab3937bc68562e989dc143362ec4d4275262ba9f359338aed720fc914457a5",
|
||||
@@ -62,7 +56,16 @@ http_file(
|
||||
|
||||
docker_pull(
|
||||
name = "debian-iptables-amd64",
|
||||
digest = "sha256:bc20977ac38abfb43071b4c61c4b7edb30af894c05eb06758dd61d05118d2842", # v7
|
||||
digest = "sha256:bc20977ac38abfb43071b4c61c4b7edb30af894c05eb06758dd61d05118d2842",
|
||||
registry = "gcr.io",
|
||||
repository = "google-containers/debian-iptables-amd64",
|
||||
tag = "v7", # ignored, but kept here for documentation
|
||||
)
|
||||
|
||||
docker_pull(
|
||||
name = "official_busybox",
|
||||
digest = "sha256:be3c11fdba7cfe299214e46edc642e09514dbb9bbefcd0d3836c05a1e0cd0642",
|
||||
registry = "index.docker.io",
|
||||
repository = "library/busybox",
|
||||
tag = "latest", # ignored, but kept here for documentation
|
||||
)
|
||||
|
@@ -49,6 +49,7 @@ package_group(
|
||||
packages = [
|
||||
"//test/e2e",
|
||||
"//test/e2e/framework",
|
||||
"//test/e2e/kubectl",
|
||||
"//test/e2e/workload",
|
||||
"//test/integration/etcd",
|
||||
"//test/integration/framework",
|
||||
@@ -72,7 +73,7 @@ package_group(
|
||||
packages = [
|
||||
"//cmd/kubeadm/app",
|
||||
"//cmd/kubeadm/app/cmd",
|
||||
"//cmd/kubeadm/app/master",
|
||||
"//cmd/kubeadm/app/phases/controlplane",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -42,44 +42,43 @@ spec:
|
||||
```
|
||||
|
||||
* time, t=0
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80
|
||||
$ kubectl describe ing
|
||||
No events.
|
||||
```
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80
|
||||
$ kubectl describe ing
|
||||
No events.
|
||||
```
|
||||
|
||||
* time, t=1m
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80 130.211.5.27
|
||||
```console
|
||||
$ kubectl get ing
|
||||
NAME RULE BACKEND ADDRESS
|
||||
test-ingress - default-http-backend:80 130.211.5.27
|
||||
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"UNKNOWN"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"UNKNOWN"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
|
||||
* time, t=5m
|
||||
```console
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
|
||||
```
|
||||
```console
|
||||
$ kubectl describe ing
|
||||
target-proxy: k8s-tp-default-test-ingress
|
||||
url-map: k8s-um-default-test-ingress
|
||||
backends: {"k8s-be-32342":"HEALTHY"}
|
||||
forwarding-rule: k8s-fw-default-test-ingress
|
||||
Events:
|
||||
FirstSeen LastSeen Count From SubobjectPath Reason Message
|
||||
───────── ──────── ───── ──── ───────────── ────── ───────
|
||||
46s 46s 1 {loadbalancer-controller } Success Created loadbalancer 130.211.5.27
|
||||
```
|
||||
|
||||
## Disabling GLBC
|
||||
|
||||
@@ -87,20 +86,20 @@ Since GLBC runs as a cluster addon, you cannot simply delete the RC. The easiest
|
||||
|
||||
* IFF you want to tear down existing L7 loadbalancers, hit the /delete-all-and-quit endpoint on the pod:
|
||||
|
||||
```console
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
l7-lb-controller-7bb21 1/1 Running 0 1h
|
||||
$ kubectl exec l7-lb-controller-7bb21 -c l7-lb-controller curl http://localhost:8081/delete-all-and-quit --namespace=kube-system
|
||||
$ kubectl logs l7-lb-controller-7b221 -c l7-lb-controller --follow
|
||||
...
|
||||
I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
```
|
||||
```console
|
||||
$ kubectl get pods --namespace=kube-system
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
l7-lb-controller-7bb21 1/1 Running 0 1h
|
||||
$ kubectl exec l7-lb-controller-7bb21 -c l7-lb-controller curl http://localhost:8081/delete-all-and-quit --namespace=kube-system
|
||||
$ kubectl logs l7-lb-controller-7b221 -c l7-lb-controller --follow
|
||||
...
|
||||
I1007 00:30:00.322528 1 main.go:160] Handled quit, awaiting pod deletion.
|
||||
```
|
||||
|
||||
* Nullify the RC (but don't delete it or the addon controller will "fix" it for you)
|
||||
```console
|
||||
$ kubectl scale rc l7-lb-controller --replicas=0 --namespace=kube-system
|
||||
```
|
||||
```console
|
||||
$ kubectl scale rc l7-lb-controller --replicas=0 --namespace=kube-system
|
||||
```
|
||||
|
||||
## Limitations
|
||||
|
||||
|
@@ -23,29 +23,29 @@ metadata:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.4.0-beta.0
|
||||
name: heapster-v1.4.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -65,7 +65,7 @@ spec:
|
||||
- name: usr-ca-certs
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
@@ -78,7 +78,7 @@ spec:
|
||||
- name: usr-ca-certs
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -102,12 +102,10 @@ spec:
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -131,11 +129,9 @@ spec:
|
||||
- --extra-cpu=0m
|
||||
- --memory={{base_eventer_memory}}
|
||||
- --extra-memory={{eventer_memory_per_node}}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: ssl-certs
|
||||
hostPath:
|
||||
|
@@ -23,29 +23,29 @@ metadata:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.4.0-beta.0
|
||||
name: heapster-v1.4.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -66,7 +66,7 @@ spec:
|
||||
- name: usr-ca-certs
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
@@ -79,7 +79,7 @@ spec:
|
||||
- name: usr-ca-certs
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -103,12 +103,10 @@ spec:
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -132,11 +130,9 @@ spec:
|
||||
- --extra-cpu=0m
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: ssl-certs
|
||||
hostPath:
|
||||
|
@@ -23,29 +23,29 @@ metadata:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.4.0-beta.0
|
||||
name: heapster-v1.4.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -58,13 +58,13 @@ spec:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: eventer
|
||||
command:
|
||||
- /eventer
|
||||
- --source=kubernetes:''
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -88,12 +88,10 @@ spec:
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: eventer-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -117,11 +115,9 @@ spec:
|
||||
- --extra-cpu=0m
|
||||
- --memory={{ base_eventer_memory }}
|
||||
- --extra-memory={{ eventer_memory_per_node }}Ki
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=eventer
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
|
@@ -21,29 +21,29 @@ metadata:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.4.0-beta.0
|
||||
name: heapster-v1.4.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -55,7 +55,7 @@ spec:
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- --sink=stackdriver
|
||||
- --sink=stackdriver:?cluster_name={{ cluster_name }}
|
||||
# TODO: add --disable_export when it's merged into Heapster release
|
||||
volumeMounts:
|
||||
- name: ssl-certs
|
||||
@@ -64,7 +64,7 @@ spec:
|
||||
- name: usr-ca-certs
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -88,11 +88,9 @@ spec:
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{metrics_memory_per_node}}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
volumes:
|
||||
- name: ssl-certs
|
||||
hostPath:
|
||||
|
@@ -21,29 +21,29 @@ metadata:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: heapster-v1.4.0-beta.0
|
||||
name: heapster-v1.4.0
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: heapster
|
||||
version: v1.4.0-beta.0
|
||||
version: v1.4.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0-beta.0
|
||||
- image: gcr.io/google_containers/heapster-amd64:v1.4.0
|
||||
name: heapster
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
@@ -55,7 +55,7 @@ spec:
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes.summary_api:''
|
||||
- image: gcr.io/google_containers/addon-resizer:1.7
|
||||
- image: gcr.io/google_containers/addon-resizer:2.0
|
||||
name: heapster-nanny
|
||||
resources:
|
||||
limits:
|
||||
@@ -79,11 +79,9 @@ spec:
|
||||
- --extra-cpu={{ metrics_cpu_per_node }}m
|
||||
- --memory={{ base_metrics_memory }}
|
||||
- --extra-memory={{ metrics_memory_per_node }}Mi
|
||||
- --threshold=5
|
||||
- --deployment=heapster-v1.4.0-beta.0
|
||||
- --deployment=heapster-v1.4.0
|
||||
- --container=heapster
|
||||
- --poll-period=300000
|
||||
- --estimator=exponential
|
||||
serviceAccountName: heapster
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
|
@@ -1,20 +1,20 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-es-v1.22
|
||||
name: fluentd-es-v1.24
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v1.22
|
||||
version: v1.24
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: fluentd-es
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v1.22
|
||||
version: v1.24
|
||||
# This annotation ensures that fluentd does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
@@ -24,7 +24,7 @@ spec:
|
||||
serviceAccountName: fluentd-es
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: gcr.io/google_containers/fluentd-elasticsearch:1.23
|
||||
image: gcr.io/google_containers/fluentd-elasticsearch:1.24
|
||||
command:
|
||||
- '/bin/sh'
|
||||
- '-c'
|
||||
|
@@ -16,7 +16,7 @@
|
||||
|
||||
PREFIX = gcr.io/google_containers
|
||||
IMAGE = fluentd-elasticsearch
|
||||
TAG = 1.23
|
||||
TAG = 1.24
|
||||
|
||||
build:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
@@ -32,6 +32,7 @@ sed -i -e "s/USER=td-agent/USER=root/" -e "s/GROUP=td-agent/GROUP=root/" /etc/in
|
||||
# http://docs.fluentd.org/articles/plugin-management
|
||||
td-agent-gem install --no-document fluent-plugin-kubernetes_metadata_filter -v 0.27.0
|
||||
td-agent-gem install --no-document fluent-plugin-elasticsearch -v 1.9.5
|
||||
td-agent-gem install --no-document fluent-plugin-prometheus -v 0.3.0
|
||||
|
||||
# Remove docs and postgres references
|
||||
rm -rf /opt/td-agent/embedded/share/doc \
|
||||
|
@@ -283,6 +283,44 @@
|
||||
type kubernetes_metadata
|
||||
</filter>
|
||||
|
||||
# Prometheus Exporter Plugin
|
||||
# input plugin that exports metrics
|
||||
<source>
|
||||
type prometheus
|
||||
</source>
|
||||
|
||||
<source>
|
||||
type monitor_agent
|
||||
</source>
|
||||
|
||||
<source>
|
||||
type forward
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics from MonitorAgent
|
||||
<source>
|
||||
@type prometheus_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for output plugin
|
||||
<source>
|
||||
@type prometheus_output_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
# input plugin that collects metrics for in_tail plugin
|
||||
<source>
|
||||
@type prometheus_tail_monitor
|
||||
<labels>
|
||||
host ${hostname}
|
||||
</labels>
|
||||
</source>
|
||||
|
||||
<match **>
|
||||
type elasticsearch
|
||||
log_level info
|
||||
|
@@ -46,7 +46,7 @@ spec:
|
||||
containers:
|
||||
# TODO: Add resources in 1.8
|
||||
- name: event-exporter
|
||||
image: gcr.io/google-containers/event-exporter:v0.1.0-r2
|
||||
image: gcr.io/google-containers/event-exporter:v0.1.4
|
||||
command:
|
||||
- '/event-exporter'
|
||||
- name: prometheus-to-sd-exporter
|
||||
|
@@ -70,27 +70,14 @@ data:
|
||||
|
||||
# Detect exceptions in the log output and forward them as one log entry.
|
||||
<match raw.kubernetes.**>
|
||||
@type copy
|
||||
@type detect_exceptions
|
||||
|
||||
<store>
|
||||
@type prometheus
|
||||
|
||||
<metric>
|
||||
type counter
|
||||
name logging_line_count
|
||||
desc Total number of lines generated by application containers
|
||||
</metric>
|
||||
</store>
|
||||
<store>
|
||||
@type detect_exceptions
|
||||
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
</store>
|
||||
remove_tag_prefix raw
|
||||
message log
|
||||
stream stream
|
||||
multiline_flush_interval 5
|
||||
max_bytes 500000
|
||||
max_lines 1000
|
||||
</match>
|
||||
system.input.conf: |-
|
||||
# Example:
|
||||
@@ -291,6 +278,14 @@ data:
|
||||
read_from_head true
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<source>
|
||||
type systemd
|
||||
filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
|
||||
pos_file /var/log/gcp-journald-node-problem-detector.pos
|
||||
read_from_head true
|
||||
tag node-problem-detector
|
||||
</source>
|
||||
monitoring.conf: |-
|
||||
# Prometheus monitoring
|
||||
<source>
|
||||
@@ -342,77 +337,50 @@ data:
|
||||
# compute.googleapis.com service rather than container.googleapis.com to keep
|
||||
# them separate since most users don't care about the node logs.
|
||||
<match kubernetes.**>
|
||||
@type copy
|
||||
@type google_cloud
|
||||
|
||||
<store>
|
||||
@type google_cloud
|
||||
|
||||
# Set the buffer type to file to improve the reliability and reduce the memory consumption
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
|
||||
# Set queue_full action to block because we want to pause gracefully
|
||||
# in case of the off-the-limits load instead of throwing an exception
|
||||
buffer_queue_full_action block
|
||||
# Set the chunk limit conservatively to avoid exceeding the GCL limit
|
||||
# of 10MiB per write request.
|
||||
buffer_chunk_limit 2M
|
||||
# Cap the combined memory usage of this buffer and the one below to
|
||||
# 2MiB/chunk * (6 + 2) chunks = 16 MiB
|
||||
buffer_queue_limit 6
|
||||
# Never wait more than 5 seconds before flushing logs in the non-error case.
|
||||
flush_interval 5s
|
||||
# Never wait longer than 30 seconds between retries.
|
||||
max_retry_wait 30
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
# Use multiple threads for processing.
|
||||
num_threads 2
|
||||
</store>
|
||||
<store>
|
||||
@type prometheus
|
||||
|
||||
<metric>
|
||||
type counter
|
||||
name logging_entry_count
|
||||
desc Total number of log entries generated by either application containers or system components
|
||||
<labels>
|
||||
component container
|
||||
</labels>
|
||||
</metric>
|
||||
</store>
|
||||
# Collect metrics in Prometheus registry about plugin activity.
|
||||
enable_monitoring true
|
||||
monitoring_type prometheus
|
||||
# Set the buffer type to file to improve the reliability and reduce the memory consumption
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
|
||||
# Set queue_full action to block because we want to pause gracefully
|
||||
# in case of the off-the-limits load instead of throwing an exception
|
||||
buffer_queue_full_action block
|
||||
# Set the chunk limit conservatively to avoid exceeding the GCL limit
|
||||
# of 10MiB per write request.
|
||||
buffer_chunk_limit 2M
|
||||
# Cap the combined memory usage of this buffer and the one below to
|
||||
# 2MiB/chunk * (6 + 2) chunks = 16 MiB
|
||||
buffer_queue_limit 6
|
||||
# Never wait more than 5 seconds before flushing logs in the non-error case.
|
||||
flush_interval 5s
|
||||
# Never wait longer than 30 seconds between retries.
|
||||
max_retry_wait 30
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
# Use multiple threads for processing.
|
||||
num_threads 2
|
||||
</match>
|
||||
|
||||
# Keep a smaller buffer here since these logs are less important than the user's
|
||||
# container logs.
|
||||
<match **>
|
||||
@type copy
|
||||
@type google_cloud
|
||||
|
||||
<store>
|
||||
@type google_cloud
|
||||
|
||||
detect_subservice false
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
buffer_queue_full_action block
|
||||
buffer_chunk_limit 2M
|
||||
buffer_queue_limit 2
|
||||
flush_interval 5s
|
||||
max_retry_wait 30
|
||||
disable_retry_limit
|
||||
num_threads 2
|
||||
</store>
|
||||
<store>
|
||||
@type prometheus
|
||||
|
||||
<metric>
|
||||
type counter
|
||||
name logging_entry_count
|
||||
desc Total number of log entries generated by either application containers or system components
|
||||
<labels>
|
||||
component system
|
||||
</labels>
|
||||
</metric>
|
||||
</store>
|
||||
enable_monitoring true
|
||||
monitoring_type prometheus
|
||||
detect_subservice false
|
||||
buffer_type file
|
||||
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
|
||||
buffer_queue_full_action block
|
||||
buffer_chunk_limit 2M
|
||||
buffer_queue_limit 2
|
||||
flush_interval 5s
|
||||
max_retry_wait 30
|
||||
disable_retry_limit
|
||||
num_threads 2
|
||||
</match>
|
||||
metadata:
|
||||
name: fluentd-gcp-config-v1.1
|
||||
|
@@ -27,7 +27,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fluentd-gcp
|
||||
image: gcr.io/google-containers/fluentd-gcp:2.0.7
|
||||
image: gcr.io/google-containers/fluentd-gcp:2.0.8
|
||||
# If fluentd consumes its own logs, the following situation may happen:
|
||||
# fluentd fails to send a chunk to the server => writes it to the log =>
|
||||
# tries to send this message to the server => fails to send a chunk and so on.
|
||||
@@ -90,13 +90,13 @@ spec:
|
||||
exit 1;
|
||||
fi;
|
||||
- name: prometheus-to-sd-exporter
|
||||
image: gcr.io/google-containers/prometheus-to-sd:v0.1.0
|
||||
image: gcr.io/google-containers/prometheus-to-sd:v0.1.3
|
||||
command:
|
||||
- /monitor
|
||||
- --component=fluentd
|
||||
- --target-port=31337
|
||||
- --stackdriver-prefix=container.googleapis.com/internal/addons
|
||||
- --whitelisted-metrics=logging_line_count,logging_entry_count
|
||||
- --whitelisted-metrics=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count
|
||||
volumeMounts:
|
||||
- name: ssl-certs
|
||||
mountPath: /etc/ssl/certs
|
||||
@@ -107,6 +107,9 @@ spec:
|
||||
effect: "NoSchedule"
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
#TODO: remove this toleration once #44445 is properly fixed.
|
||||
- operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
|
@@ -1,6 +1,6 @@
|
||||
# Collecting Docker Log Files with Fluentd and sending to GCP.
|
||||
|
||||
The image was moved to the the
|
||||
The image was moved to the
|
||||
[new location](https://github.com/kubernetes/contrib/tree/master/fluentd/fluentd-gcp-image).
|
||||
|
||||
[]()
|
||||
|
@@ -80,3 +80,5 @@ spec:
|
||||
tolerations:
|
||||
- operator: "Exists"
|
||||
effect: "NoExecute"
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
@@ -605,6 +605,7 @@ function build-kube-env {
|
||||
|
||||
rm -f ${file}
|
||||
cat >$file <<EOF
|
||||
CLUSTER_NAME: $(yaml-quote ${CLUSTER_NAME})
|
||||
ENV_TIMESTAMP: $(yaml-quote $(date -u +%Y-%m-%dT%T%z))
|
||||
INSTANCE_PREFIX: $(yaml-quote ${INSTANCE_PREFIX})
|
||||
NODE_INSTANCE_PREFIX: $(yaml-quote ${NODE_INSTANCE_PREFIX})
|
||||
@@ -664,7 +665,9 @@ ENABLE_DEFAULT_STORAGE_CLASS: $(yaml-quote ${ENABLE_DEFAULT_STORAGE_CLASS:-})
|
||||
ENABLE_APISERVER_BASIC_AUDIT: $(yaml-quote ${ENABLE_APISERVER_BASIC_AUDIT:-})
|
||||
ENABLE_APISERVER_ADVANCED_AUDIT: $(yaml-quote ${ENABLE_APISERVER_ADVANCED_AUDIT:-})
|
||||
ENABLE_CACHE_MUTATION_DETECTOR: $(yaml-quote ${ENABLE_CACHE_MUTATION_DETECTOR:-false})
|
||||
ENABLE_PATCH_CONVERSION_DETECTOR: $(yaml-quote ${ENABLE_PATCH_CONVERSION_DETECTOR:-false})
|
||||
ADVANCED_AUDIT_BACKEND: $(yaml-quote ${ADVANCED_AUDIT_BACKEND:-log})
|
||||
GCE_API_ENDPOINT: $(yaml-quote ${GCE_API_ENDPOINT:-})
|
||||
EOF
|
||||
if [ -n "${KUBELET_PORT:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
@@ -741,6 +744,12 @@ EOF
|
||||
if [ -n "${FEATURE_GATES:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
FEATURE_GATES: $(yaml-quote ${FEATURE_GATES})
|
||||
EOF
|
||||
fi
|
||||
if [[ "${master}" == "true" && "${MASTER_OS_DISTRIBUTION}" == "gci" ]] ||
|
||||
[[ "${master}" == "false" && "${NODE_OS_DISTRIBUTION}" == "gci" ]]; then
|
||||
cat >>$file <<EOF
|
||||
VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume})
|
||||
EOF
|
||||
fi
|
||||
|
||||
|
@@ -33,6 +33,9 @@ function get-master-size {
|
||||
if [[ "${NUM_NODES}" -gt "500" ]]; then
|
||||
suggested_master_size=32
|
||||
fi
|
||||
if [[ "${NUM_NODES}" -gt "3000" ]]; then
|
||||
suggested_master_size=64
|
||||
fi
|
||||
echo "${suggested_master_size}"
|
||||
}
|
||||
|
||||
|
@@ -19,6 +19,9 @@
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
|
||||
|
||||
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
|
||||
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
|
||||
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
|
||||
GCLOUD=gcloud
|
||||
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
|
||||
REGION=${ZONE%-*}
|
||||
@@ -42,7 +45,7 @@ KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
|
||||
KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-debian}}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
|
||||
if [[ "${MASTER_OS_DISTRIBUTION}" == "coreos" ]]; then
|
||||
MASTER_OS_DISTRIBUTION="container-linux"
|
||||
fi
|
||||
@@ -63,15 +66,16 @@ if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
|
||||
NODE_ACCELERATORS=""
|
||||
fi
|
||||
|
||||
# By default a cluster will be started with the master on GCI and nodes on
|
||||
# containervm. If you are updating the containervm version, update this
|
||||
# variable. Also please update corresponding image for node e2e at:
|
||||
# By default a cluster will be started with the master and nodes
|
||||
# on Container-optimized OS (cos, previously known as gci). If
|
||||
# you are updating the os image versions, update this variable.
|
||||
# Also please update corresponding image for node e2e at:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170214}
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-59-9460-64-0}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${CVM_VERSION}}
|
||||
NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
|
||||
NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
|
||||
CONTAINER_RUNTIME=${KUBE_CONTAINER_RUNTIME:-docker}
|
||||
RKT_VERSION=${KUBE_RKT_VERSION:-1.23.0}
|
||||
@@ -118,6 +122,10 @@ ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
|
||||
# standalone - Heapster only. Metrics available via Heapster REST API.
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# One special node out of NUM_NODES would be created of this type if specified.
|
||||
# Useful for scheduling heapster in large clusters with nodes of small size.
|
||||
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
||||
|
||||
# Historically fluentd was a manifest pod and then was migrated to DaemonSet.
|
||||
# To avoid situation during cluster upgrade when there are two instances
|
||||
# of fluentd running on a node, kubelet need to mark node on which
|
||||
@@ -125,7 +133,7 @@ ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
# TODO(piosz): remove this in 1.8
|
||||
NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
|
||||
# To avoid running Calico on a node that is not configured appropriately,
|
||||
# To avoid running Calico on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
NODE_LABELS="${NODE_LABELS},projectcalico.org/ds-ready=true"
|
||||
|
@@ -19,6 +19,9 @@
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
|
||||
|
||||
# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
|
||||
# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
|
||||
GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
|
||||
GCLOUD=gcloud
|
||||
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
|
||||
REGION=${ZONE%-*}
|
||||
@@ -62,11 +65,13 @@ if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
|
||||
NODE_ACCELERATORS=""
|
||||
fi
|
||||
|
||||
# By default a cluster will be started with the master on GCI and nodes on
|
||||
# containervm. If you are updating the containervm version, update this
|
||||
# variable. Also please update corresponding image for node e2e at:
|
||||
# By default a cluster will be started with the master and nodes
|
||||
# on Container-VM, the deprecated OS. Some tests assume container-VM,
|
||||
# and only when that is fixed can we use Container-Optimized OS
|
||||
# (cos, gci) as we do in config-default.sh.
|
||||
# Also please update corresponding image for node e2e at:
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170214}
|
||||
CVM_VERSION=${CVM_VERSION:-container-vm-v20170627}
|
||||
GCI_VERSION=${KUBE_GCI_VERSION:-cos-stable-59-9460-64-0}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
@@ -128,6 +133,10 @@ ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
|
||||
# standalone - Heapster only. Metrics available via Heapster REST API.
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
# One special node out of NUM_NODES would be created of this type if specified.
|
||||
# Useful for scheduling heapster in large clusters with nodes of small size.
|
||||
HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
|
||||
|
||||
# Set etcd image (e.g. 3.0.17-alpha.1) and version (e.g. 3.0.17) if you need
|
||||
# non-default version.
|
||||
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
|
||||
|
@@ -224,6 +224,11 @@ function create-master-auth {
|
||||
cat <<EOF >/etc/gce.conf
|
||||
[global]
|
||||
EOF
|
||||
if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
api-endpoint = ${GCE_API_ENDPOINT}
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
|
||||
use_cloud_config="true"
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
@@ -232,6 +237,11 @@ token-body = ${TOKEN_BODY}
|
||||
project-id = ${PROJECT_ID}
|
||||
network-name = ${NODE_NETWORK}
|
||||
EOF
|
||||
if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
network-project-id = ${NETWORK_PROJECT_ID}
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
subnetwork-name = ${NODE_SUBNETWORK}
|
||||
@@ -983,7 +993,16 @@ function start-kube-apiserver {
|
||||
|
||||
local container_env=""
|
||||
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
|
||||
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
|
||||
container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
|
||||
fi
|
||||
if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
|
||||
if [[ -n "${container_env}" ]]; then
|
||||
container_env="${container_env}, "
|
||||
fi
|
||||
container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\""
|
||||
fi
|
||||
if [[ -n "${container_env}" ]]; then
|
||||
container_env="\"env\":[{${container_env}}],"
|
||||
fi
|
||||
|
||||
src_file="${src_dir}/kube-apiserver.manifest"
|
||||
@@ -1168,6 +1187,8 @@ function setup-addon-manifests {
|
||||
}
|
||||
|
||||
# Prepares the manifests of k8s addons, and starts the addon manager.
|
||||
# Vars assumed:
|
||||
# CLUSTER_NAME
|
||||
function start-kube-addons {
|
||||
echo "Prepare kube-addons manifests and start kube addon manager"
|
||||
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
|
||||
@@ -1205,6 +1226,7 @@ function start-kube-addons {
|
||||
controller_yaml="${controller_yaml}/heapster-controller.yaml"
|
||||
fi
|
||||
remove-salt-config-comments "${controller_yaml}"
|
||||
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
|
||||
|
@@ -17,14 +17,19 @@
|
||||
# A library of helper functions and constant for the Container Linux distro.
|
||||
source "${KUBE_ROOT}/cluster/gce/container-linux/helper.sh"
|
||||
|
||||
function get-node-instance-metadata {
|
||||
local metadata=""
|
||||
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
|
||||
metadata+="user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml,"
|
||||
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh,"
|
||||
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
|
||||
echo "${metadata}"
|
||||
}
|
||||
|
||||
# $1: template name (required).
|
||||
function create-node-instance-template {
|
||||
local template_name="$1"
|
||||
|
||||
create-node-template "$template_name" "${scope_flags[*]}" \
|
||||
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
||||
"user-data=${KUBE_ROOT}/cluster/gce/container-linux/node.yaml" \
|
||||
"configure-sh=${KUBE_ROOT}/cluster/gce/container-linux/configure.sh" \
|
||||
"cluster-name=${KUBE_TEMP}/cluster-name.txt"
|
||||
create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)"
|
||||
# TODO(euank): We should include update-strategy here. We should also switch to ignition
|
||||
}
|
||||
|
@@ -16,12 +16,17 @@
|
||||
|
||||
# A library of helper functions and constant for debian os distro
|
||||
|
||||
function get-node-instance-metadata {
|
||||
local metadata=""
|
||||
metadata+="startup-script=${KUBE_TEMP}/configure-vm.sh,"
|
||||
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
|
||||
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt"
|
||||
echo "${metadata}"
|
||||
}
|
||||
|
||||
# $1: template name (required)
|
||||
function create-node-instance-template {
|
||||
local template_name="$1"
|
||||
prepare-startup-script
|
||||
create-node-template "$template_name" "${scope_flags}" \
|
||||
"startup-script=${KUBE_TEMP}/configure-vm.sh" \
|
||||
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
||||
"cluster-name=${KUBE_TEMP}/cluster-name.txt"
|
||||
create-node-template "$template_name" "${scope_flags}" "$(get-node-instance-metadata)"
|
||||
}
|
||||
|
@@ -379,6 +379,11 @@ function create-master-auth {
|
||||
cat <<EOF >/etc/gce.conf
|
||||
[global]
|
||||
EOF
|
||||
if [[ -n "${GCE_API_ENDPOINT:-}" ]]; then
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
api-endpoint = ${GCE_API_ENDPOINT}
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${PROJECT_ID:-}" && -n "${TOKEN_URL:-}" && -n "${TOKEN_BODY:-}" && -n "${NODE_NETWORK:-}" ]]; then
|
||||
use_cloud_config="true"
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
@@ -387,6 +392,11 @@ token-body = ${TOKEN_BODY}
|
||||
project-id = ${PROJECT_ID}
|
||||
network-name = ${NODE_NETWORK}
|
||||
EOF
|
||||
if [[ -n "${NETWORK_PROJECT_ID:-}" ]]; then
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
network-project-id = ${NETWORK_PROJECT_ID}
|
||||
EOF
|
||||
fi
|
||||
if [[ -n "${NODE_SUBNETWORK:-}" ]]; then
|
||||
cat <<EOF >>/etc/gce.conf
|
||||
subnetwork-name = ${NODE_SUBNETWORK}
|
||||
@@ -912,7 +922,11 @@ function start-kubelet {
|
||||
flags+=" --cni-bin-dir=/home/kubernetes/bin"
|
||||
if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then
|
||||
# Calico uses CNI always.
|
||||
flags+=" --network-plugin=cni"
|
||||
if [[ "${KUBERNETES_PRIVATE_MASTER:-}" == "true" ]]; then
|
||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||
else
|
||||
flags+=" --network-plugin=cni"
|
||||
fi
|
||||
else
|
||||
# Otherwise use the configured value.
|
||||
flags+=" --network-plugin=${NETWORK_PROVIDER}"
|
||||
@@ -1182,6 +1196,7 @@ function prepare-mounter-rootfs {
|
||||
mount --make-rshared "${CONTAINERIZED_MOUNTER_ROOTFS}/var/lib/kubelet"
|
||||
mount --bind -o ro /proc "${CONTAINERIZED_MOUNTER_ROOTFS}/proc"
|
||||
mount --bind -o ro /dev "${CONTAINERIZED_MOUNTER_ROOTFS}/dev"
|
||||
mount --bind -o ro /etc/resolv.conf "${CONTAINERIZED_MOUNTER_ROOTFS}/etc/resolv.conf"
|
||||
}
|
||||
|
||||
# A helper function for removing salt configuration and comments from a file.
|
||||
@@ -1398,7 +1413,16 @@ function start-kube-apiserver {
|
||||
|
||||
local container_env=""
|
||||
if [[ -n "${ENABLE_CACHE_MUTATION_DETECTOR:-}" ]]; then
|
||||
container_env="\"env\":[{\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\"}],"
|
||||
container_env="\"name\": \"KUBE_CACHE_MUTATION_DETECTOR\", \"value\": \"${ENABLE_CACHE_MUTATION_DETECTOR}\""
|
||||
fi
|
||||
if [[ -n "${ENABLE_PATCH_CONVERSION_DETECTOR:-}" ]]; then
|
||||
if [[ -n "${container_env}" ]]; then
|
||||
container_env="${container_env}, "
|
||||
fi
|
||||
container_env="\"name\": \"KUBE_PATCH_CONVERSION_DETECTOR\", \"value\": \"${ENABLE_PATCH_CONVERSION_DETECTOR}\""
|
||||
fi
|
||||
if [[ -n "${container_env}" ]]; then
|
||||
container_env="\"env\":[{${container_env}}],"
|
||||
fi
|
||||
|
||||
if [[ -n "${ENCRYPTION_PROVIDER_CONFIG:-}" ]]; then
|
||||
@@ -1597,6 +1621,8 @@ function setup-addon-manifests {
|
||||
}
|
||||
|
||||
# Prepares the manifests of k8s addons, and starts the addon manager.
|
||||
# Vars assumed:
|
||||
# CLUSTER_NAME
|
||||
function start-kube-addons {
|
||||
echo "Prepare kube-addons manifests and start kube addon manager"
|
||||
local -r src_dir="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty"
|
||||
@@ -1634,6 +1660,7 @@ function start-kube-addons {
|
||||
controller_yaml="${controller_yaml}/heapster-controller.yaml"
|
||||
fi
|
||||
remove-salt-config-comments "${controller_yaml}"
|
||||
sed -i -e "s@{{ cluster_name }}@${CLUSTER_NAME}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_metrics_memory *}}@${base_metrics_memory}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_metrics_cpu *}}@${base_metrics_cpu}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *base_eventer_memory *}}@${base_eventer_memory}@g" "${controller_yaml}"
|
||||
@@ -1701,7 +1728,7 @@ function start-kube-addons {
|
||||
sed -i -e "s@__CALICO_TYPHA_CPU__@$(get-calico-typha-cpu)@g" "${typha_dep_file}"
|
||||
sed -i -e "s@__CALICO_TYPHA_REPLICAS__@$(get-calico-typha-replicas)@g" "${typha_dep_file}"
|
||||
else
|
||||
# If not configured to use Calico, the set the typha replica count to 0, but only if the
|
||||
# If not configured to use Calico, the set the typha replica count to 0, but only if the
|
||||
# addon is present.
|
||||
local -r typha_dep_file="${dst_dir}/calico-policy-controller/typha-deployment.yaml"
|
||||
if [[ -e $typha_dep_file ]]; then
|
||||
|
@@ -17,16 +17,21 @@
|
||||
# A library of helper functions and constant for GCI distro
|
||||
source "${KUBE_ROOT}/cluster/gce/gci/helper.sh"
|
||||
|
||||
function get-node-instance-metadata {
|
||||
local metadata=""
|
||||
metadata+="kube-env=${KUBE_TEMP}/node-kube-env.yaml,"
|
||||
metadata+="user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml,"
|
||||
metadata+="configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh,"
|
||||
metadata+="cluster-name=${KUBE_TEMP}/cluster-name.txt,"
|
||||
metadata+="gci-update-strategy=${KUBE_TEMP}/gci-update.txt,"
|
||||
metadata+="gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt,"
|
||||
metadata+="gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt"
|
||||
echo "${metadata}"
|
||||
}
|
||||
|
||||
# $1: template name (required).
|
||||
function create-node-instance-template {
|
||||
local template_name="$1"
|
||||
ensure-gci-metadata-files
|
||||
create-node-template "$template_name" "${scope_flags[*]}" \
|
||||
"kube-env=${KUBE_TEMP}/node-kube-env.yaml" \
|
||||
"user-data=${KUBE_ROOT}/cluster/gce/gci/node.yaml" \
|
||||
"configure-sh=${KUBE_ROOT}/cluster/gce/gci/configure.sh" \
|
||||
"cluster-name=${KUBE_TEMP}/cluster-name.txt" \
|
||||
"gci-update-strategy=${KUBE_TEMP}/gci-update.txt" \
|
||||
"gci-ensure-gke-docker=${KUBE_TEMP}/gci-ensure-gke-docker.txt" \
|
||||
"gci-docker-version=${KUBE_TEMP}/gci-docker-version.txt"
|
||||
create-node-template "$template_name" "${scope_flags[*]}" "$(get-node-instance-metadata)"
|
||||
}
|
||||
|
@@ -345,6 +345,11 @@ function detect-node-names() {
|
||||
--format='value(instance)'))
|
||||
done
|
||||
fi
|
||||
# Add heapster node name to the list too (if it exists).
|
||||
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||
NODE_NAMES+=("${NODE_INSTANCE_PREFIX}-heapster")
|
||||
fi
|
||||
|
||||
echo "INSTANCE_GROUPS=${INSTANCE_GROUPS[*]:-}" >&2
|
||||
echo "NODE_NAMES=${NODE_NAMES[*]:-}" >&2
|
||||
}
|
||||
@@ -510,7 +515,7 @@ function make-gcloud-network-argument() {
|
||||
ret="${ret},aliases=pods-default:${alias_size}"
|
||||
ret="${ret} --no-can-ip-forward"
|
||||
else
|
||||
if [[ ${PREEXISTING_NETWORK} = "true" && "${PREEXISTING_NETWORK_MODE}" != "custom" ]]; then
|
||||
if [[ ${ENABLE_BIG_CLUSTER_SUBNETS} != "true" || (${PREEXISTING_NETWORK} = "true" && "${PREEXISTING_NETWORK_MODE}" != "custom") ]]; then
|
||||
ret="--network ${network}"
|
||||
else
|
||||
ret="--subnet=${network}"
|
||||
@@ -533,7 +538,7 @@ function get-template-name-from-version() {
|
||||
# Robustly try to create an instance template.
|
||||
# $1: The name of the instance template.
|
||||
# $2: The scopes flag.
|
||||
# $3 and others: Metadata entries (must all be from a file).
|
||||
# $3: String of comma-separated metadata entries (must all be from a file).
|
||||
function create-node-template() {
|
||||
detect-project
|
||||
local template_name="$1"
|
||||
@@ -600,7 +605,7 @@ function create-node-template() {
|
||||
${network} \
|
||||
${preemptible_minions} \
|
||||
$2 \
|
||||
--metadata-from-file $(echo ${@:3} | tr ' ' ',') >&2; then
|
||||
--metadata-from-file $3 >&2; then
|
||||
if (( attempt > 5 )); then
|
||||
echo -e "${color_red}Failed to create instance template $template_name ${color_norm}" >&2
|
||||
exit 2
|
||||
@@ -1237,21 +1242,24 @@ function create-nodes-firewall() {
|
||||
}
|
||||
}
|
||||
|
||||
function create-nodes-template() {
|
||||
echo "Creating minions."
|
||||
|
||||
# TODO(zmerlynn): Refactor setting scope flags.
|
||||
function get-scope-flags() {
|
||||
local scope_flags=
|
||||
if [[ -n "${NODE_SCOPES}" ]]; then
|
||||
scope_flags="--scopes ${NODE_SCOPES}"
|
||||
else
|
||||
scope_flags="--no-scopes"
|
||||
fi
|
||||
echo "${scope_flags}"
|
||||
}
|
||||
|
||||
function create-nodes-template() {
|
||||
echo "Creating nodes."
|
||||
|
||||
local scope_flags=$(get-scope-flags)
|
||||
|
||||
write-node-env
|
||||
|
||||
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
||||
|
||||
create-node-instance-template $template_name
|
||||
}
|
||||
|
||||
@@ -1279,7 +1287,13 @@ function set_num_migs() {
|
||||
function create-nodes() {
|
||||
local template_name="${NODE_INSTANCE_PREFIX}-template"
|
||||
|
||||
local instances_left=${NUM_NODES}
|
||||
if [[ -z "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||
local -r nodes="${NUM_NODES}"
|
||||
else
|
||||
local -r nodes=$(( NUM_NODES - 1 ))
|
||||
fi
|
||||
|
||||
local instances_left=${nodes}
|
||||
|
||||
#TODO: parallelize this loop to speed up the process
|
||||
for ((i=1; i<=${NUM_MIGS}; i++)); do
|
||||
@@ -1305,6 +1319,47 @@ function create-nodes() {
|
||||
--zone "${ZONE}" \
|
||||
--project "${PROJECT}" || true;
|
||||
done
|
||||
|
||||
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||
echo "Creating a special node for heapster with machine-type ${HEAPSTER_MACHINE_TYPE}"
|
||||
create-heapster-node
|
||||
fi
|
||||
}
|
||||
|
||||
# Assumes:
|
||||
# - NODE_INSTANCE_PREFIX
|
||||
# - PROJECT
|
||||
# - ZONE
|
||||
# - HEAPSTER_MACHINE_TYPE
|
||||
# - NODE_DISK_TYPE
|
||||
# - NODE_DISK_SIZE
|
||||
# - NODE_IMAGE_PROJECT
|
||||
# - NODE_IMAGE
|
||||
# - NODE_TAG
|
||||
# - NETWORK
|
||||
# - ENABLE_IP_ALIASES
|
||||
# - IP_ALIAS_SUBNETWORK
|
||||
# - IP_ALIAS_SIZE
|
||||
function create-heapster-node() {
|
||||
local network=$(make-gcloud-network-argument \
|
||||
"${NETWORK}" "" \
|
||||
"${ENABLE_IP_ALIASES:-}" \
|
||||
"${IP_ALIAS_SUBNETWORK:-}" \
|
||||
"${IP_ALIAS_SIZE:-}")
|
||||
|
||||
gcloud compute instances \
|
||||
create "${NODE_INSTANCE_PREFIX}-heapster" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--machine-type="${HEAPSTER_MACHINE_TYPE}" \
|
||||
--boot-disk-type "${NODE_DISK_TYPE}" \
|
||||
--boot-disk-size "${NODE_DISK_SIZE}" \
|
||||
--image-project="${NODE_IMAGE_PROJECT}" \
|
||||
--image "${NODE_IMAGE}" \
|
||||
--tags "${NODE_TAG}" \
|
||||
${network} \
|
||||
$(get-scope-flags) \
|
||||
--metadata-from-file "$(get-node-instance-metadata)"
|
||||
}
|
||||
|
||||
# Assumes:
|
||||
@@ -1505,6 +1560,20 @@ function kube-down() {
|
||||
"${template}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Delete the special heapster node (if it exists).
|
||||
if [[ -n "${HEAPSTER_MACHINE_TYPE:-}" ]]; then
|
||||
local -r heapster_machine_name="${NODE_INSTANCE_PREFIX}-heapster"
|
||||
if gcloud compute instances describe "${heapster_machine_name}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
|
||||
# Now we can safely delete the VM.
|
||||
gcloud compute instances delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
--delete-disks all \
|
||||
--zone "${ZONE}" \
|
||||
"${heapster_machine_name}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
local -r REPLICA_NAME="${KUBE_REPLICA_NAME:-$(get-replica-name)}"
|
||||
@@ -1875,13 +1944,7 @@ function prepare-push() {
|
||||
if [[ "${node}" == "true" ]]; then
|
||||
write-node-env
|
||||
|
||||
# TODO(zmerlynn): Refactor setting scope flags.
|
||||
local scope_flags=
|
||||
if [[ -n "${NODE_SCOPES}" ]]; then
|
||||
scope_flags="--scopes ${NODE_SCOPES}"
|
||||
else
|
||||
scope_flags="--no-scopes"
|
||||
fi
|
||||
local scope_flags=$(get-scope-flags)
|
||||
|
||||
# Ugly hack: Since it is not possible to delete instance-template that is currently
|
||||
# being used, create a temp one, then delete the old one and recreate it once again.
|
||||
|
@@ -29,6 +29,7 @@ from charms.layer import nginx
|
||||
from subprocess import Popen
|
||||
from subprocess import PIPE
|
||||
from subprocess import STDOUT
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
|
||||
@when('certificates.available')
|
||||
@@ -49,6 +50,16 @@ def request_server_certificates(tls):
|
||||
tls.request_server_cert(common_name, sans, certificate_name)
|
||||
|
||||
|
||||
@when('config.changed.port')
|
||||
def close_old_port():
|
||||
config = hookenv.config()
|
||||
old_port = config.previous('port')
|
||||
try:
|
||||
hookenv.close_port(old_port)
|
||||
except CalledProcessError:
|
||||
hookenv.log('Port %d already closed, skipping.' % old_port)
|
||||
|
||||
|
||||
@when('nginx.available', 'apiserver.available',
|
||||
'certificates.server.cert.available')
|
||||
def install_load_balancer(apiserver, tls):
|
||||
@@ -63,20 +74,23 @@ def install_load_balancer(apiserver, tls):
|
||||
if cert_exists and key_exists:
|
||||
# At this point the cert and key exist, and they are owned by root.
|
||||
chown = ['chown', 'www-data:www-data', server_cert_path]
|
||||
|
||||
# Change the owner to www-data so the nginx process can read the cert.
|
||||
subprocess.call(chown)
|
||||
chown = ['chown', 'www-data:www-data', server_key_path]
|
||||
|
||||
# Change the owner to www-data so the nginx process can read the key.
|
||||
subprocess.call(chown)
|
||||
|
||||
hookenv.open_port(hookenv.config('port'))
|
||||
port = hookenv.config('port')
|
||||
hookenv.open_port(port)
|
||||
services = apiserver.services()
|
||||
nginx.configure_site(
|
||||
'apilb',
|
||||
'apilb.conf',
|
||||
server_name='_',
|
||||
services=services,
|
||||
port=hookenv.config('port'),
|
||||
port=port,
|
||||
server_certificate=server_cert_path,
|
||||
server_key=server_key_path,
|
||||
)
|
||||
|
@@ -8,7 +8,7 @@ upstream target_service {
|
||||
|
||||
|
||||
server {
|
||||
listen 443 ssl http2;
|
||||
listen {{ port }} ssl http2;
|
||||
server_name {{ server_name }};
|
||||
|
||||
access_log /var/log/nginx.access.log;
|
||||
@@ -33,9 +33,6 @@ server {
|
||||
proxy_set_header Connection $http_connection;
|
||||
proxy_set_header X-Stream-Protocol-Version $http_x_stream_protocol_version;
|
||||
|
||||
proxy_ssl_certificate {{ server_certificate }};
|
||||
proxy_ssl_certificate_key {{ server_key }};
|
||||
|
||||
add_header X-Stream-Protocol-Version $upstream_http_x_stream_protocol_version;
|
||||
|
||||
proxy_pass https://target_service;
|
||||
|
@@ -21,7 +21,8 @@ and then relate the `kubernetes-e2e` charm.
|
||||
```shell
|
||||
juju deploy kubernetes-core
|
||||
juju deploy cs:~containers/kubernetes-e2e
|
||||
juju add-relation kubernetes-e2e kubernetes-master
|
||||
juju add-relation kubernetes-e2e:kube-control kubernetes-master:kube-control
|
||||
juju add-relation kubernetes-e2e:kubernetes-master kubernetes-master:kube-api-endpoint
|
||||
juju add-relation kubernetes-e2e easyrsa
|
||||
```
|
||||
|
||||
|
@@ -4,6 +4,7 @@ includes:
|
||||
- layer:tls-client
|
||||
- layer:snap
|
||||
- interface:http
|
||||
- interface:kube-control
|
||||
options:
|
||||
tls-client:
|
||||
ca_certificate_path: '/srv/kubernetes/ca.crt'
|
||||
|
@@ -14,6 +14,8 @@ series:
|
||||
requires:
|
||||
kubernetes-master:
|
||||
interface: http
|
||||
kube-control:
|
||||
interface: kube-control
|
||||
resources:
|
||||
kubectl:
|
||||
type: file
|
||||
@@ -23,3 +25,4 @@ resources:
|
||||
type: file
|
||||
filename: kubernetes-test.snap
|
||||
description: kubernetes-test snap
|
||||
|
||||
|
@@ -38,15 +38,22 @@ def reset_delivery_states():
|
||||
|
||||
|
||||
@when('kubernetes-e2e.installed')
|
||||
def report_status():
|
||||
''' Report the status of the charm. '''
|
||||
messaging()
|
||||
|
||||
|
||||
def messaging():
|
||||
''' Probe our relations to determine the propper messaging to the
|
||||
end user '''
|
||||
|
||||
missing_services = []
|
||||
if not is_state('kubernetes-master.available'):
|
||||
missing_services.append('kubernetes-master')
|
||||
missing_services.append('kubernetes-master:http')
|
||||
if not is_state('certificates.available'):
|
||||
missing_services.append('certificates')
|
||||
if not is_state('kubeconfig.ready'):
|
||||
missing_services.append('kubernetes-master:kube-control')
|
||||
|
||||
if missing_services:
|
||||
if len(missing_services) > 1:
|
||||
@@ -80,16 +87,15 @@ def install_snaps():
|
||||
|
||||
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
|
||||
'tls_client.client.key.saved', 'kubernetes-master.available',
|
||||
'kubernetes-e2e.installed')
|
||||
'kubernetes-e2e.installed', 'kube-control.auth.available')
|
||||
@when_not('kubeconfig.ready')
|
||||
def prepare_kubeconfig_certificates(master):
|
||||
def prepare_kubeconfig_certificates(master, kube_control):
|
||||
''' Prepare the data to feed to create the kubeconfig file. '''
|
||||
|
||||
layer_options = layer.options('tls-client')
|
||||
# Get all the paths to the tls information required for kubeconfig.
|
||||
ca = layer_options.get('ca_certificate_path')
|
||||
key = layer_options.get('client_key_path')
|
||||
cert = layer_options.get('client_certificate_path')
|
||||
creds = kube_control.get_auth_credentials()
|
||||
|
||||
servers = get_kube_api_servers(master)
|
||||
|
||||
@@ -97,17 +103,28 @@ def prepare_kubeconfig_certificates(master):
|
||||
kubeconfig_path = '/home/ubuntu/.kube/config'
|
||||
|
||||
# Create kubernetes configuration in the default location for ubuntu.
|
||||
create_kubeconfig('/root/.kube/config', servers[0], ca, key, cert,
|
||||
user='root')
|
||||
create_kubeconfig(kubeconfig_path, servers[0], ca, key, cert,
|
||||
user='ubuntu')
|
||||
create_kubeconfig('/root/.kube/config', servers[0], ca,
|
||||
token=creds['client_token'], user='root')
|
||||
create_kubeconfig(kubeconfig_path, servers[0], ca,
|
||||
token=creds['client_token'], user='ubuntu')
|
||||
# Set permissions on the ubuntu users kubeconfig to ensure a consistent UX
|
||||
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
|
||||
check_call(cmd)
|
||||
|
||||
messaging()
|
||||
set_state('kubeconfig.ready')
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
def request_credentials(kube_control):
|
||||
""" Request authorization creds."""
|
||||
|
||||
# The kube-cotrol interface is created to support RBAC.
|
||||
# At this point we might as well do the right thing and return the hostname
|
||||
# even if it will only be used when we enable RBAC
|
||||
user = 'system:masters'
|
||||
kube_control.set_auth_request(user)
|
||||
|
||||
|
||||
@when('kubernetes-e2e.installed', 'kubeconfig.ready')
|
||||
def set_app_version():
|
||||
''' Declare the application version to juju '''
|
||||
@@ -124,19 +141,40 @@ def set_app_version():
|
||||
hookenv.application_version_set(version_from.rstrip())
|
||||
|
||||
|
||||
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
|
||||
context='juju-context', cluster='juju-cluster'):
|
||||
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
|
||||
user='ubuntu', context='juju-context',
|
||||
cluster='juju-cluster', password=None, token=None):
|
||||
'''Create a configuration for Kubernetes based on path using the supplied
|
||||
arguments for values of the Kubernetes server, CA, key, certificate, user
|
||||
context and cluster.'''
|
||||
if not key and not certificate and not password and not token:
|
||||
raise ValueError('Missing authentication mechanism.')
|
||||
|
||||
# token and password are mutually exclusive. Error early if both are
|
||||
# present. The developer has requested an impossible situation.
|
||||
# see: kubectl config set-credentials --help
|
||||
if token and password:
|
||||
raise ValueError('Token and Password are mutually exclusive.')
|
||||
# Create the config file with the address of the master server.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
|
||||
'--server={2} --certificate-authority={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
|
||||
# Delete old users
|
||||
cmd = 'kubectl config --kubeconfig={0} unset users'
|
||||
check_call(split(cmd.format(kubeconfig)))
|
||||
# Create the credentials using the client flags.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
|
||||
'--client-key={2} --client-certificate={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
|
||||
cmd = 'kubectl config --kubeconfig={0} ' \
|
||||
'set-credentials {1} '.format(kubeconfig, user)
|
||||
|
||||
if key and certificate:
|
||||
cmd = '{0} --client-key={1} --client-certificate={2} '\
|
||||
'--embed-certs=true'.format(cmd, key, certificate)
|
||||
if password:
|
||||
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
|
||||
# This is mutually exclusive from password. They will not work together.
|
||||
if token:
|
||||
cmd = "{0} --token={1}".format(cmd, token)
|
||||
check_call(split(cmd))
|
||||
# Create a default context with the cluster.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
|
||||
'--cluster={2} --user={3}'
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
from yaml import safe_load as load
|
||||
from charmhelpers.core.hookenv import (
|
||||
action_get,
|
||||
@@ -11,6 +11,9 @@ from charms.templating.jinja2 import render
|
||||
from subprocess import check_output
|
||||
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
|
||||
|
||||
def kubectl(args):
|
||||
cmd = ['kubectl'] + args
|
||||
return check_output(cmd)
|
||||
|
@@ -26,3 +26,8 @@ options:
|
||||
default: "stable"
|
||||
description: |
|
||||
Snap channel to install Kubernetes master services from
|
||||
client_password:
|
||||
type: string
|
||||
default: ""
|
||||
description: |
|
||||
Password to be used for admin user (leave empty for random password).
|
||||
|
@@ -37,7 +37,7 @@ from charms.reactive import remove_state
|
||||
from charms.reactive import set_state
|
||||
from charms.reactive import is_state
|
||||
from charms.reactive import when, when_any, when_not
|
||||
from charms.reactive.helpers import data_changed
|
||||
from charms.reactive.helpers import data_changed, any_file_changed
|
||||
from charms.kubernetes.common import get_version
|
||||
from charms.kubernetes.common import retry
|
||||
from charms.kubernetes.flagmanager import FlagManager
|
||||
@@ -45,6 +45,7 @@ from charms.kubernetes.flagmanager import FlagManager
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import unitdata
|
||||
from charmhelpers.core.host import service_stop
|
||||
from charmhelpers.core.templating import render
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.contrib.charmsupport import nrpe
|
||||
@@ -77,8 +78,8 @@ def reset_states_for_delivery():
|
||||
'''An upgrade charm event was triggered by Juju, react to that here.'''
|
||||
migrate_from_pre_snaps()
|
||||
install_snaps()
|
||||
set_state('reconfigure.authentication.setup')
|
||||
remove_state('authentication.setup')
|
||||
remove_state('kubernetes-master.components.started')
|
||||
|
||||
|
||||
def rename_file_idempotent(source, destination):
|
||||
@@ -155,6 +156,7 @@ def install_snaps():
|
||||
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
|
||||
snap.install('cdk-addons', channel=channel)
|
||||
set_state('kubernetes-master.snaps.installed')
|
||||
remove_state('kubernetes-master.components.started')
|
||||
|
||||
|
||||
@when('config.changed.channel')
|
||||
@@ -162,6 +164,22 @@ def channel_changed():
|
||||
install_snaps()
|
||||
|
||||
|
||||
@when('config.changed.client_password', 'leadership.is_leader')
|
||||
def password_changed():
|
||||
"""Handle password change via the charms config."""
|
||||
password = hookenv.config('client_password')
|
||||
if password == "" and is_state('client.password.initialised'):
|
||||
# password_changed is called during an upgrade. Nothing to do.
|
||||
return
|
||||
elif password == "":
|
||||
# Password not initialised
|
||||
password = token_generator()
|
||||
setup_basic_auth(password, "admin", "admin")
|
||||
set_state('reconfigure.authentication.setup')
|
||||
remove_state('authentication.setup')
|
||||
set_state('client.password.initialised')
|
||||
|
||||
|
||||
@when('cni.connected')
|
||||
@when_not('cni.configured')
|
||||
def configure_cni(cni):
|
||||
@@ -187,19 +205,23 @@ def setup_leader_authentication():
|
||||
|
||||
keys = [service_key, basic_auth, known_tokens]
|
||||
# Try first to fetch data from an old leadership broadcast.
|
||||
if not get_keys_from_leader(keys):
|
||||
if not os.path.isfile(basic_auth):
|
||||
setup_basic_auth('admin', 'admin', 'admin')
|
||||
if not get_keys_from_leader(keys) \
|
||||
or is_state('reconfigure.authentication.setup'):
|
||||
last_pass = get_password('basic_auth.csv', 'admin')
|
||||
setup_basic_auth(last_pass, 'admin', 'admin')
|
||||
|
||||
if not os.path.isfile(known_tokens):
|
||||
setup_tokens(None, 'admin', 'admin')
|
||||
setup_tokens(None, 'kubelet', 'kubelet')
|
||||
setup_tokens(None, 'kube_proxy', 'kube_proxy')
|
||||
|
||||
# Generate the default service account token key
|
||||
os.makedirs('/root/cdk', exist_ok=True)
|
||||
if not os.path.isfile(service_key):
|
||||
cmd = ['openssl', 'genrsa', '-out', service_key,
|
||||
'2048']
|
||||
check_call(cmd)
|
||||
remove_state('reconfigure.authentication.setup')
|
||||
|
||||
api_opts.add('service-account-key-file', service_key)
|
||||
controller_opts.add('service-account-private-key-file', service_key)
|
||||
@@ -215,36 +237,42 @@ def setup_leader_authentication():
|
||||
# eg:
|
||||
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
|
||||
charms.leadership.leader_set(leader_data)
|
||||
|
||||
remove_state('kubernetes-master.components.started')
|
||||
set_state('authentication.setup')
|
||||
|
||||
|
||||
@when_not('leadership.is_leader')
|
||||
@when_not('authentication.setup')
|
||||
def setup_non_leader_authentication():
|
||||
api_opts = FlagManager('kube-apiserver')
|
||||
controller_opts = FlagManager('kube-controller-manager')
|
||||
|
||||
service_key = '/root/cdk/serviceaccount.key'
|
||||
basic_auth = '/root/cdk/basic_auth.csv'
|
||||
known_tokens = '/root/cdk/known_tokens.csv'
|
||||
|
||||
hookenv.status_set('maintenance', 'Rendering authentication templates.')
|
||||
|
||||
keys = [service_key, basic_auth, known_tokens]
|
||||
if not get_keys_from_leader(keys):
|
||||
# The source of truth for non-leaders is the leader.
|
||||
# Therefore we overwrite_local with whatever the leader has.
|
||||
if not get_keys_from_leader(keys, overwrite_local=True):
|
||||
# the keys were not retrieved. Non-leaders have to retry.
|
||||
return
|
||||
|
||||
if not any_file_changed(keys) and is_state('authentication.setup'):
|
||||
# No change detected and we have already setup the authentication
|
||||
return
|
||||
|
||||
hookenv.status_set('maintenance', 'Rendering authentication templates.')
|
||||
api_opts = FlagManager('kube-apiserver')
|
||||
api_opts.add('basic-auth-file', basic_auth)
|
||||
api_opts.add('token-auth-file', known_tokens)
|
||||
api_opts.add('service-account-key-file', service_key)
|
||||
|
||||
controller_opts = FlagManager('kube-controller-manager')
|
||||
controller_opts.add('service-account-private-key-file', service_key)
|
||||
|
||||
remove_state('kubernetes-master.components.started')
|
||||
set_state('authentication.setup')
|
||||
|
||||
|
||||
def get_keys_from_leader(keys):
|
||||
def get_keys_from_leader(keys, overwrite_local=False):
|
||||
"""
|
||||
Gets the broadcasted keys from the leader and stores them in
|
||||
the corresponding files.
|
||||
@@ -261,7 +289,7 @@ def get_keys_from_leader(keys):
|
||||
|
||||
for k in keys:
|
||||
# If the path does not exist, assume we need it
|
||||
if not os.path.exists(k):
|
||||
if not os.path.exists(k) or overwrite_local:
|
||||
# Fetch data from leadership broadcast
|
||||
contents = charms.leadership.leader_get(k)
|
||||
# Default to logging the warning and wait for leader data to be set
|
||||
@@ -351,6 +379,22 @@ def send_cluster_dns_detail(kube_control):
|
||||
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
|
||||
|
||||
|
||||
@when('kube-control.auth.requested')
|
||||
@when('authentication.setup')
|
||||
@when('leadership.is_leader')
|
||||
def send_tokens(kube_control):
|
||||
"""Send the tokens to the workers."""
|
||||
kubelet_token = get_token('kubelet')
|
||||
proxy_token = get_token('kube_proxy')
|
||||
admin_token = get_token('admin')
|
||||
|
||||
# Send the data
|
||||
requests = kube_control.auth_user()
|
||||
for request in requests:
|
||||
kube_control.sign_auth_request(request[0], kubelet_token,
|
||||
proxy_token, admin_token)
|
||||
|
||||
|
||||
@when_not('kube-control.connected')
|
||||
def missing_kube_control():
|
||||
"""Inform the operator they need to add the kube-control relation.
|
||||
@@ -448,7 +492,7 @@ def addons_ready():
|
||||
|
||||
|
||||
@when('loadbalancer.available', 'certificates.ca.available',
|
||||
'certificates.client.cert.available')
|
||||
'certificates.client.cert.available', 'authentication.setup')
|
||||
def loadbalancer_kubeconfig(loadbalancer, ca, client):
|
||||
# Get the potential list of loadbalancers from the relation object.
|
||||
hosts = loadbalancer.get_addresses_ports()
|
||||
@@ -460,7 +504,8 @@ def loadbalancer_kubeconfig(loadbalancer, ca, client):
|
||||
build_kubeconfig(server)
|
||||
|
||||
|
||||
@when('certificates.ca.available', 'certificates.client.cert.available')
|
||||
@when('certificates.ca.available', 'certificates.client.cert.available',
|
||||
'authentication.setup')
|
||||
@when_not('loadbalancer.available')
|
||||
def create_self_config(ca, client):
|
||||
'''Create a kubernetes configuration for the master unit.'''
|
||||
@@ -651,6 +696,16 @@ def disable_gpu_mode():
|
||||
remove_state('kubernetes-master.gpu.enabled')
|
||||
|
||||
|
||||
@hook('stop')
|
||||
def shutdown():
|
||||
""" Stop the kubernetes master services
|
||||
|
||||
"""
|
||||
service_stop('snap.kube-apiserver.daemon')
|
||||
service_stop('snap.kube-controller-manager.daemon')
|
||||
service_stop('snap.kube-scheduler.daemon')
|
||||
|
||||
|
||||
def arch():
|
||||
'''Return the package architecture as a string. Raise an exception if the
|
||||
architecture is not supported by kubernetes.'''
|
||||
@@ -669,37 +724,54 @@ def build_kubeconfig(server):
|
||||
# Get all the paths to the tls information required for kubeconfig.
|
||||
ca = layer_options.get('ca_certificate_path')
|
||||
ca_exists = ca and os.path.isfile(ca)
|
||||
key = layer_options.get('client_key_path')
|
||||
key_exists = key and os.path.isfile(key)
|
||||
cert = layer_options.get('client_certificate_path')
|
||||
cert_exists = cert and os.path.isfile(cert)
|
||||
client_pass = get_password('basic_auth.csv', 'admin')
|
||||
# Do we have everything we need?
|
||||
if ca_exists and key_exists and cert_exists:
|
||||
# Cache last server string to know if we need to regenerate the config.
|
||||
if not data_changed('kubeconfig.server', server):
|
||||
return
|
||||
if ca_exists and client_pass:
|
||||
# Create an absolute path for the kubeconfig file.
|
||||
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
|
||||
# Create the kubeconfig on this system so users can access the cluster.
|
||||
create_kubeconfig(kubeconfig_path, server, ca, key, cert)
|
||||
|
||||
create_kubeconfig(kubeconfig_path, server, ca,
|
||||
user='admin', password=client_pass)
|
||||
# Make the config file readable by the ubuntu users so juju scp works.
|
||||
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
|
||||
context='juju-context', cluster='juju-cluster'):
|
||||
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
|
||||
user='ubuntu', context='juju-context',
|
||||
cluster='juju-cluster', password=None, token=None):
|
||||
'''Create a configuration for Kubernetes based on path using the supplied
|
||||
arguments for values of the Kubernetes server, CA, key, certificate, user
|
||||
context and cluster.'''
|
||||
if not key and not certificate and not password and not token:
|
||||
raise ValueError('Missing authentication mechanism.')
|
||||
|
||||
# token and password are mutually exclusive. Error early if both are
|
||||
# present. The developer has requested an impossible situation.
|
||||
# see: kubectl config set-credentials --help
|
||||
if token and password:
|
||||
raise ValueError('Token and Password are mutually exclusive.')
|
||||
# Create the config file with the address of the master server.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
|
||||
'--server={2} --certificate-authority={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
|
||||
# Delete old users
|
||||
cmd = 'kubectl config --kubeconfig={0} unset users'
|
||||
check_call(split(cmd.format(kubeconfig)))
|
||||
# Create the credentials using the client flags.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
|
||||
'--client-key={2} --client-certificate={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
|
||||
cmd = 'kubectl config --kubeconfig={0} ' \
|
||||
'set-credentials {1} '.format(kubeconfig, user)
|
||||
|
||||
if key and certificate:
|
||||
cmd = '{0} --client-key={1} --client-certificate={2} '\
|
||||
'--embed-certs=true'.format(cmd, key, certificate)
|
||||
if password:
|
||||
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
|
||||
# This is mutually exclusive from password. They will not work together.
|
||||
if token:
|
||||
cmd = "{0} --token={1}".format(cmd, token)
|
||||
check_call(split(cmd))
|
||||
# Create a default context with the cluster.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
|
||||
'--cluster={2} --user={3}'
|
||||
@@ -786,7 +858,6 @@ def configure_master_services():
|
||||
api_opts.add('service-cluster-ip-range', service_cidr())
|
||||
api_opts.add('min-request-timeout', '300')
|
||||
api_opts.add('v', '4')
|
||||
api_opts.add('client-ca-file', ca_cert_path)
|
||||
api_opts.add('tls-cert-file', server_cert_path)
|
||||
api_opts.add('tls-private-key-file', server_key_path)
|
||||
api_opts.add('kubelet-certificate-authority', ca_cert_path)
|
||||
@@ -826,6 +897,7 @@ def configure_master_services():
|
||||
|
||||
cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')
|
||||
check_call(cmd)
|
||||
|
||||
cmd = (
|
||||
['snap', 'set', 'kube-controller-manager'] +
|
||||
controller_opts.to_s().split(' ')
|
||||
@@ -835,14 +907,16 @@ def configure_master_services():
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def setup_basic_auth(username='admin', password='admin', user='admin'):
|
||||
def setup_basic_auth(password=None, username='admin', uid='admin'):
|
||||
'''Create the htacces file and the tokens.'''
|
||||
root_cdk = '/root/cdk'
|
||||
if not os.path.isdir(root_cdk):
|
||||
os.makedirs(root_cdk)
|
||||
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
|
||||
if not password:
|
||||
password = token_generator()
|
||||
with open(htaccess, 'w') as stream:
|
||||
stream.write('{0},{1},{2}'.format(username, password, user))
|
||||
stream.write('{0},{1},{2}'.format(password, username, uid))
|
||||
|
||||
|
||||
def setup_tokens(token, username, user):
|
||||
@@ -852,12 +926,49 @@ def setup_tokens(token, username, user):
|
||||
os.makedirs(root_cdk)
|
||||
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
|
||||
if not token:
|
||||
alpha = string.ascii_letters + string.digits
|
||||
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32))
|
||||
token = token_generator()
|
||||
with open(known_tokens, 'a') as stream:
|
||||
stream.write('{0},{1},{2}\n'.format(token, username, user))
|
||||
|
||||
|
||||
def get_password(csv_fname, user):
|
||||
'''Get the password of user within the csv file provided.'''
|
||||
root_cdk = '/root/cdk'
|
||||
tokens_fname = os.path.join(root_cdk, csv_fname)
|
||||
if not os.path.isfile(tokens_fname):
|
||||
return None
|
||||
with open(tokens_fname, 'r') as stream:
|
||||
for line in stream:
|
||||
record = line.split(',')
|
||||
if record[1] == user:
|
||||
return record[0]
|
||||
return None
|
||||
|
||||
|
||||
def get_token(username):
|
||||
"""Grab a token from the static file if present. """
|
||||
return get_password('known_tokens.csv', username)
|
||||
|
||||
|
||||
def set_token(password, save_salt):
|
||||
''' Store a token so it can be recalled later by token_generator.
|
||||
|
||||
param: password - the password to be stored
|
||||
param: save_salt - the key to store the value of the token.'''
|
||||
db = unitdata.kv()
|
||||
db.set(save_salt, password)
|
||||
return db.get(save_salt)
|
||||
|
||||
|
||||
def token_generator(length=32):
|
||||
''' Generate a random token for use in passwords and account tokens.
|
||||
|
||||
param: length - the length of the token to generate'''
|
||||
alpha = string.ascii_letters + string.digits
|
||||
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
|
||||
return token
|
||||
|
||||
|
||||
@retry(times=3, delay_secs=10)
|
||||
def all_kube_system_pods_running():
|
||||
''' Check pod status in the kube-system namespace. Returns True if all
|
||||
@@ -871,7 +982,6 @@ def all_kube_system_pods_running():
|
||||
return False
|
||||
|
||||
result = json.loads(output)
|
||||
|
||||
for pod in result['items']:
|
||||
status = pod['status']['phase']
|
||||
if status != 'Running':
|
||||
|
@@ -22,6 +22,7 @@ options:
|
||||
- 'ceph-common'
|
||||
- 'nfs-common'
|
||||
- 'socat'
|
||||
- 'virt-what'
|
||||
tls-client:
|
||||
ca_certificate_path: '/root/cdk/ca.crt'
|
||||
server_certificate_path: '/root/cdk/server.crt'
|
||||
|
@@ -149,6 +149,7 @@ def install_snaps():
|
||||
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
|
||||
snap.install('kube-proxy', channel=channel, classic=True)
|
||||
set_state('kubernetes-worker.snaps.installed')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
remove_state('kubernetes-worker.snaps.upgrade-needed')
|
||||
remove_state('kubernetes-worker.snaps.upgrade-specified')
|
||||
|
||||
@@ -157,15 +158,15 @@ def install_snaps():
|
||||
def shutdown():
|
||||
''' When this unit is destroyed:
|
||||
- delete the current node
|
||||
- stop the kubelet service
|
||||
- stop the kube-proxy service
|
||||
- remove the 'kubernetes-worker.cni-plugins.installed' state
|
||||
- stop the worker services
|
||||
'''
|
||||
if os.path.isfile(kubeconfig_path):
|
||||
kubectl('delete', 'node', gethostname())
|
||||
service_stop('kubelet')
|
||||
service_stop('kube-proxy')
|
||||
remove_state('kubernetes-worker.cni-plugins.installed')
|
||||
try:
|
||||
if os.path.isfile(kubeconfig_path):
|
||||
kubectl('delete', 'node', gethostname())
|
||||
except CalledProcessError:
|
||||
hookenv.log('Failed to unregister node.')
|
||||
service_stop('snap.kubelet.daemon')
|
||||
service_stop('snap.kube-proxy.daemon')
|
||||
|
||||
|
||||
@when('docker.available')
|
||||
@@ -303,9 +304,10 @@ def watch_for_changes(kube_api, kube_control, cni):
|
||||
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
|
||||
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
|
||||
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
|
||||
'tls_client.server.key.saved', 'kube-control.dns.available',
|
||||
'tls_client.server.key.saved',
|
||||
'kube-control.dns.available', 'kube-control.auth.available',
|
||||
'cni.available', 'kubernetes-worker.restart-needed')
|
||||
def start_worker(kube_api, kube_control, cni):
|
||||
def start_worker(kube_api, kube_control, auth_control, cni):
|
||||
''' Start kubelet using the provided API and DNS info.'''
|
||||
servers = get_kube_api_servers(kube_api)
|
||||
# Note that the DNS server doesn't necessarily exist at this point. We know
|
||||
@@ -320,10 +322,13 @@ def start_worker(kube_api, kube_control, cni):
|
||||
hookenv.log('Waiting for cluster cidr.')
|
||||
return
|
||||
|
||||
creds = kube_control.get_auth_credentials()
|
||||
data_changed('kube-control.creds', creds)
|
||||
|
||||
# set --allow-privileged flag for kubelet
|
||||
set_privileged()
|
||||
|
||||
create_config(random.choice(servers))
|
||||
create_config(random.choice(servers), creds)
|
||||
configure_worker_services(servers, dns, cluster_cidr)
|
||||
set_state('kubernetes-worker.config.created')
|
||||
restart_unit_services()
|
||||
@@ -429,27 +434,25 @@ def arch():
|
||||
return architecture
|
||||
|
||||
|
||||
def create_config(server):
|
||||
def create_config(server, creds):
|
||||
'''Create a kubernetes configuration for the worker unit.'''
|
||||
# Get the options from the tls-client layer.
|
||||
layer_options = layer.options('tls-client')
|
||||
# Get all the paths to the tls information required for kubeconfig.
|
||||
ca = layer_options.get('ca_certificate_path')
|
||||
key = layer_options.get('client_key_path')
|
||||
cert = layer_options.get('client_certificate_path')
|
||||
|
||||
# Create kubernetes configuration in the default location for ubuntu.
|
||||
create_kubeconfig('/home/ubuntu/.kube/config', server, ca, key, cert,
|
||||
user='ubuntu')
|
||||
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
|
||||
token=creds['client_token'], user='ubuntu')
|
||||
# Make the config dir readable by the ubuntu users so juju scp works.
|
||||
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
|
||||
check_call(cmd)
|
||||
# Create kubernetes configuration in the default location for root.
|
||||
create_kubeconfig('/root/.kube/config', server, ca, key, cert,
|
||||
user='root')
|
||||
create_kubeconfig('/root/.kube/config', server, ca,
|
||||
token=creds['client_token'], user='root')
|
||||
# Create kubernetes configuration for kubelet, and kube-proxy services.
|
||||
create_kubeconfig(kubeconfig_path, server, ca, key, cert,
|
||||
user='kubelet')
|
||||
create_kubeconfig(kubeconfig_path, server, ca,
|
||||
token=creds['kubelet_token'], user='kubelet')
|
||||
|
||||
|
||||
def configure_worker_services(api_servers, dns, cluster_cidr):
|
||||
@@ -464,7 +467,6 @@ def configure_worker_services(api_servers, dns, cluster_cidr):
|
||||
kubelet_opts.add('require-kubeconfig', 'true')
|
||||
kubelet_opts.add('kubeconfig', kubeconfig_path)
|
||||
kubelet_opts.add('network-plugin', 'cni')
|
||||
kubelet_opts.add('logtostderr', 'true')
|
||||
kubelet_opts.add('v', '0')
|
||||
kubelet_opts.add('address', '0.0.0.0')
|
||||
kubelet_opts.add('port', '10250')
|
||||
@@ -474,6 +476,7 @@ def configure_worker_services(api_servers, dns, cluster_cidr):
|
||||
kubelet_opts.add('client-ca-file', ca_cert_path)
|
||||
kubelet_opts.add('tls-cert-file', server_cert_path)
|
||||
kubelet_opts.add('tls-private-key-file', server_key_path)
|
||||
kubelet_opts.add('logtostderr', 'true')
|
||||
|
||||
kube_proxy_opts = FlagManager('kube-proxy')
|
||||
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
|
||||
@@ -482,25 +485,49 @@ def configure_worker_services(api_servers, dns, cluster_cidr):
|
||||
kube_proxy_opts.add('v', '0')
|
||||
kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
|
||||
|
||||
if b'lxc' in check_output('virt-what', shell=True):
|
||||
kube_proxy_opts.add('conntrack-max-per-core', '0')
|
||||
|
||||
cmd = ['snap', 'set', 'kubelet'] + kubelet_opts.to_s().split(' ')
|
||||
check_call(cmd)
|
||||
cmd = ['snap', 'set', 'kube-proxy'] + kube_proxy_opts.to_s().split(' ')
|
||||
check_call(cmd)
|
||||
|
||||
|
||||
def create_kubeconfig(kubeconfig, server, ca, key, certificate, user='ubuntu',
|
||||
context='juju-context', cluster='juju-cluster'):
|
||||
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
|
||||
user='ubuntu', context='juju-context',
|
||||
cluster='juju-cluster', password=None, token=None):
|
||||
'''Create a configuration for Kubernetes based on path using the supplied
|
||||
arguments for values of the Kubernetes server, CA, key, certificate, user
|
||||
context and cluster.'''
|
||||
if not key and not certificate and not password and not token:
|
||||
raise ValueError('Missing authentication mechanism.')
|
||||
|
||||
# token and password are mutually exclusive. Error early if both are
|
||||
# present. The developer has requested an impossible situation.
|
||||
# see: kubectl config set-credentials --help
|
||||
if token and password:
|
||||
raise ValueError('Token and Password are mutually exclusive.')
|
||||
# Create the config file with the address of the master server.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
|
||||
'--server={2} --certificate-authority={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
|
||||
# Delete old users
|
||||
cmd = 'kubectl config --kubeconfig={0} unset users'
|
||||
check_call(split(cmd.format(kubeconfig)))
|
||||
# Create the credentials using the client flags.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-credentials {1} ' \
|
||||
'--client-key={2} --client-certificate={3} --embed-certs=true'
|
||||
check_call(split(cmd.format(kubeconfig, user, key, certificate)))
|
||||
cmd = 'kubectl config --kubeconfig={0} ' \
|
||||
'set-credentials {1} '.format(kubeconfig, user)
|
||||
|
||||
if key and certificate:
|
||||
cmd = '{0} --client-key={1} --client-certificate={2} '\
|
||||
'--embed-certs=true'.format(cmd, key, certificate)
|
||||
if password:
|
||||
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
|
||||
# This is mutually exclusive from password. They will not work together.
|
||||
if token:
|
||||
cmd = "{0} --token={1}".format(cmd, token)
|
||||
check_call(split(cmd))
|
||||
# Create a default context with the cluster.
|
||||
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
|
||||
'--cluster={2} --user={3}'
|
||||
@@ -762,6 +789,26 @@ def notify_master_gpu_not_enabled(kube_control):
|
||||
kube_control.set_gpu(False)
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
def request_kubelet_and_proxy_credentials(kube_control):
|
||||
""" Request kubelet node authorization with a well formed kubelet user.
|
||||
This also implies that we are requesting kube-proxy auth. """
|
||||
|
||||
# The kube-cotrol interface is created to support RBAC.
|
||||
# At this point we might as well do the right thing and return the hostname
|
||||
# even if it will only be used when we enable RBAC
|
||||
nodeuser = 'system:node:{}'.format(gethostname())
|
||||
kube_control.set_auth_request(nodeuser)
|
||||
|
||||
|
||||
@when('kube-control.auth.available')
|
||||
def catch_change_in_creds(kube_control):
|
||||
"""Request a service restart in case credential updates were detected."""
|
||||
creds = kube_control.get_auth_credentials()
|
||||
if data_changed('kube-control.creds', creds):
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
@when_not('kube-control.connected')
|
||||
def missing_kube_control():
|
||||
"""Inform the operator they need to add the kube-control relation.
|
||||
|
@@ -35,7 +35,7 @@ REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
|
||||
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-gci}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-debian}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-gci}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-cos-stable-59-9460-64-0}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
|
||||
|
39
cluster/kubemark/pre-existing/config-default.sh
Normal file
39
cluster/kubemark/pre-existing/config-default.sh
Normal file
@@ -0,0 +1,39 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Configuration for landing a Kubemark cluster on a pre-existing Kubernetes
|
||||
# cluster.
|
||||
|
||||
# Pre-existing provider expects a MASTER_IP.
|
||||
# If you need to specify a port that's not the default (443), add it to MASTER_IP.
|
||||
#
|
||||
# Example: Connect to the Master on the secure port 6443
|
||||
# MASTER_IP=192.168.122.5:6443
|
||||
#
|
||||
MASTER_IP="${MASTER_IP:-}"
|
||||
|
||||
# The container registry and project given to the kubemark container:
|
||||
# $CONTAINER_REGISTRY/$PROJECT/kubemark
|
||||
#
|
||||
CONTAINER_REGISTRY="${CONTAINER_REGISTRY:-}"
|
||||
PROJECT="${PROJECT:-}"
|
||||
|
||||
NUM_NODES="${NUM_NODES:-1}"
|
||||
|
||||
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
|
||||
KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-}"
|
||||
KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-}"
|
||||
MASTER_NAME="${MASTER_NAME:-}"
|
||||
USE_REAL_PROXIER="${USE_REAL_PROXIER:-true}"
|
@@ -224,8 +224,19 @@ function dump_nodes() {
|
||||
return
|
||||
fi
|
||||
|
||||
nodes_selected_for_logs=()
|
||||
if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then
|
||||
# We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
|
||||
for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}`
|
||||
do
|
||||
nodes_selected_for_logs+=("${node_names[$index]}")
|
||||
done
|
||||
else
|
||||
nodes_selected_for_logs=( "${node_names[@]}" )
|
||||
fi
|
||||
|
||||
proc=${max_scp_processes}
|
||||
for node_name in "${node_names[@]}"; do
|
||||
for node_name in "${nodes_selected_for_logs[@]}"; do
|
||||
node_dir="${report_dir}/${node_name}"
|
||||
mkdir -p "${node_dir}"
|
||||
# Save logs in the background. This speeds up things when there are
|
||||
|
363
cluster/log-dump/log-dump.sh
Executable file
363
cluster/log-dump/log-dump.sh
Executable file
@@ -0,0 +1,363 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Call this to dump all master and node logs into the folder specified in $1
|
||||
# (defaults to _artifacts). Only works if the provider supports SSH.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
readonly report_dir="${1:-_artifacts}"
|
||||
|
||||
# In order to more trivially extend log-dump for custom deployments,
|
||||
# check for a function named log_dump_custom_get_instances. If it's
|
||||
# defined, we assume the function can me called with one argument, the
|
||||
# role, which is either "master" or "node".
|
||||
if [[ $(type -t log_dump_custom_get_instances) == "function" ]]; then
|
||||
readonly use_custom_instance_list=yes
|
||||
else
|
||||
readonly use_custom_instance_list=
|
||||
fi
|
||||
|
||||
readonly master_ssh_supported_providers="gce aws kubemark"
|
||||
readonly node_ssh_supported_providers="gce gke aws kubemark"
|
||||
|
||||
readonly master_logfiles="kube-apiserver kube-scheduler rescheduler kube-controller-manager etcd etcd-events glbc cluster-autoscaler kube-addon-manager fluentd"
|
||||
readonly node_logfiles="kube-proxy fluentd node-problem-detector"
|
||||
readonly node_systemd_services="node-problem-detector"
|
||||
readonly hollow_node_logfiles="kubelet-hollow-node-* kubeproxy-hollow-node-* npd-*"
|
||||
readonly aws_logfiles="cloud-init-output"
|
||||
readonly gce_logfiles="startupscript"
|
||||
readonly kern_logfile="kern"
|
||||
readonly initd_logfiles="docker"
|
||||
readonly supervisord_logfiles="kubelet supervisor/supervisord supervisor/kubelet-stdout supervisor/kubelet-stderr supervisor/docker-stdout supervisor/docker-stderr"
|
||||
readonly systemd_services="kubelet docker"
|
||||
|
||||
# Limit the number of concurrent node connections so that we don't run out of
|
||||
# file descriptors for large clusters.
|
||||
readonly max_scp_processes=25
|
||||
|
||||
# This template spits out the external IPs and images for each node in the cluster in a format like so:
|
||||
# 52.32.7.85 gcr.io/google_containers/kube-apiserver:1355c18c32d7bef16125120bce194fad gcr.io/google_containers/kube-controller-manager:46365cdd8d28b8207950c3c21d1f3900 [...]
|
||||
readonly ips_and_images='{range .items[*]}{@.status.addresses[?(@.type == "ExternalIP")].address} {@.status.images[*].names[*]}{"\n"}{end}'
|
||||
|
||||
function setup() {
|
||||
if [[ -z "${use_custom_instance_list}" ]]; then
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
: ${KUBE_CONFIG_FILE:="config-test.sh"}
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
detect-project &> /dev/null
|
||||
elif [[ -z "${LOG_DUMP_SSH_KEY:-}" ]]; then
|
||||
echo "LOG_DUMP_SSH_KEY not set, but required when using log_dump_custom_get_instances"
|
||||
exit 1
|
||||
elif [[ -z "${LOG_DUMP_SSH_USER:-}" ]]; then
|
||||
echo "LOG_DUMP_SSH_USER not set, but required when using log_dump_custom_get_instances"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function log-dump-ssh() {
|
||||
if [[ -z "${use_custom_instance_list}" ]]; then
|
||||
ssh-to-node "$@"
|
||||
return
|
||||
fi
|
||||
|
||||
local host="$1"
|
||||
local cmd="$2"
|
||||
|
||||
ssh -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${host}" "${cmd}"
|
||||
}
|
||||
|
||||
# Copy all files /var/log/{$3}.log on node $1 into local dir $2.
|
||||
# $3 should be a space-separated string of files.
|
||||
# This function shouldn't ever trigger errexit, but doesn't block stderr.
|
||||
function copy-logs-from-node() {
|
||||
local -r node="${1}"
|
||||
local -r dir="${2}"
|
||||
local files=( ${3} )
|
||||
# Append ".log*"
|
||||
# The * at the end is needed to also copy rotated logs (which happens
|
||||
# in large clusters and long runs).
|
||||
files=( "${files[@]/%/.log*}" )
|
||||
# Prepend "/var/log/"
|
||||
files=( "${files[@]/#/\/var\/log\/}" )
|
||||
# Comma delimit (even the singleton, or scp does the wrong thing), surround by braces.
|
||||
local -r scp_files="{$(printf "%s," "${files[@]}")}"
|
||||
|
||||
if [[ -n "${use_custom_instance_list}" ]]; then
|
||||
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${LOG_DUMP_SSH_KEY}" "${LOG_DUMP_SSH_USER}@${node}:${scp_files}" "${dir}" > /dev/null || true
|
||||
else
|
||||
case "${KUBERNETES_PROVIDER}" in
|
||||
gce|gke|kubemark)
|
||||
# get-serial-port-output lets you ask for ports 1-4, but currently (11/21/2016) only port 1 contains useful information
|
||||
gcloud compute instances get-serial-port-output --project "${PROJECT}" --zone "${ZONE}" --port 1 "${node}" > "${dir}/serial-1.log" || true
|
||||
gcloud compute scp --recurse --project "${PROJECT}" --zone "${ZONE}" "${node}:${scp_files}" "${dir}" > /dev/null || true
|
||||
;;
|
||||
aws)
|
||||
local ip=$(get_ssh_hostname "${node}")
|
||||
scp -oLogLevel=quiet -oConnectTimeout=30 -oStrictHostKeyChecking=no -i "${AWS_SSH_KEY}" "${SSH_USER}@${ip}:${scp_files}" "${dir}" > /dev/null || true
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
}
|
||||
|
||||
# Save logs for node $1 into directory $2. Pass in any non-common files in $3.
|
||||
# Pass in any non-common systemd services in $4.
|
||||
# $3 and $4 should be a space-separated list of files.
|
||||
# This function shouldn't ever trigger errexit
|
||||
function save-logs() {
|
||||
local -r node_name="${1}"
|
||||
local -r dir="${2}"
|
||||
local files="${3}"
|
||||
local opt_systemd_services="${4:-""}"
|
||||
if [[ -n "${use_custom_instance_list}" ]]; then
|
||||
if [[ -n "${LOG_DUMP_SAVE_LOGS:-}" ]]; then
|
||||
files="${files} ${LOG_DUMP_SAVE_LOGS:-}"
|
||||
fi
|
||||
else
|
||||
case "${KUBERNETES_PROVIDER}" in
|
||||
gce|gke|kubemark)
|
||||
files="${files} ${gce_logfiles}"
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "kubemark" && "${ENABLE_HOLLOW_NODE_LOGS:-}" == "true" ]]; then
|
||||
files="${files} ${hollow_node_logfiles}"
|
||||
fi
|
||||
;;
|
||||
aws)
|
||||
files="${files} ${aws_logfiles}"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
local -r services=( ${systemd_services} ${opt_systemd_services} ${LOG_DUMP_SAVE_SERVICES:-} )
|
||||
|
||||
if log-dump-ssh "${node_name}" "command -v journalctl" &> /dev/null; then
|
||||
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-installation.service" > "${dir}/kube-node-installation.log" || true
|
||||
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -u kube-node-configuration.service" > "${dir}/kube-node-configuration.log" || true
|
||||
log-dump-ssh "${node_name}" "sudo journalctl --output=short-precise -k" > "${dir}/kern.log" || true
|
||||
|
||||
for svc in "${services[@]}"; do
|
||||
log-dump-ssh "${node_name}" "sudo journalctl --output=cat -u ${svc}.service" > "${dir}/${svc}.log" || true
|
||||
done
|
||||
else
|
||||
files="${kern_logfile} ${files} ${initd_logfiles} ${supervisord_logfiles}"
|
||||
fi
|
||||
|
||||
echo "Changing logfiles to be world-readable for download"
|
||||
log-dump-ssh "${node_name}" "sudo chmod -R a+r /var/log" || true
|
||||
|
||||
echo "Copying '${files}' from ${node_name}"
|
||||
copy-logs-from-node "${node_name}" "${dir}" "${files}"
|
||||
}
|
||||
|
||||
function dump_masters() {
|
||||
local master_names
|
||||
if [[ -n "${use_custom_instance_list}" ]]; then
|
||||
master_names=( $(log_dump_custom_get_instances master) )
|
||||
elif [[ ! "${master_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
|
||||
echo "Master SSH not supported for ${KUBERNETES_PROVIDER}"
|
||||
return
|
||||
else
|
||||
if ! (detect-master &> /dev/null); then
|
||||
echo "Master not detected. Is the cluster up?"
|
||||
return
|
||||
fi
|
||||
master_names=( "${MASTER_NAME}" )
|
||||
fi
|
||||
|
||||
if [[ "${#master_names[@]}" == 0 ]]; then
|
||||
echo "No masters found?"
|
||||
return
|
||||
fi
|
||||
|
||||
proc=${max_scp_processes}
|
||||
for master_name in "${master_names[@]}"; do
|
||||
master_dir="${report_dir}/${master_name}"
|
||||
mkdir -p "${master_dir}"
|
||||
save-logs "${master_name}" "${master_dir}" "${master_logfiles}" &
|
||||
|
||||
# We don't want to run more than ${max_scp_processes} at a time, so
|
||||
# wait once we hit that many nodes. This isn't ideal, since one might
|
||||
# take much longer than the others, but it should help.
|
||||
proc=$((proc - 1))
|
||||
if [[ proc -eq 0 ]]; then
|
||||
proc=${max_scp_processes}
|
||||
wait
|
||||
fi
|
||||
done
|
||||
# Wait for any remaining processes.
|
||||
if [[ proc -gt 0 && proc -lt ${max_scp_processes} ]]; then
|
||||
wait
|
||||
fi
|
||||
}
|
||||
|
||||
function dump_nodes() {
|
||||
local node_names
|
||||
if [[ -n "$1" ]]; then
|
||||
echo "Dumping logs for nodes provided as args to dump_nodes() function"
|
||||
node_names=( "$@" )
|
||||
elif [[ -n "${use_custom_instance_list}" ]]; then
|
||||
echo "Dumping logs for nodes provided by log_dump_custom_get_instances() function"
|
||||
node_names=( $(log_dump_custom_get_instances node) )
|
||||
elif [[ ! "${node_ssh_supported_providers}" =~ "${KUBERNETES_PROVIDER}" ]]; then
|
||||
echo "Node SSH not supported for ${KUBERNETES_PROVIDER}"
|
||||
return
|
||||
else
|
||||
echo "Detecting nodes in the cluster"
|
||||
detect-node-names &> /dev/null
|
||||
node_names=( "${NODE_NAMES[@]}" )
|
||||
fi
|
||||
|
||||
if [[ "${#node_names[@]}" == 0 ]]; then
|
||||
echo "No nodes found!"
|
||||
return
|
||||
fi
|
||||
|
||||
nodes_selected_for_logs=()
|
||||
if [[ -n "${LOGDUMP_ONLY_N_RANDOM_NODES:-}" ]]; then
|
||||
# We randomly choose 'LOGDUMP_ONLY_N_RANDOM_NODES' many nodes for fetching logs.
|
||||
for index in `shuf -i 0-$(( ${#node_names[*]} - 1 )) -n ${LOGDUMP_ONLY_N_RANDOM_NODES}`
|
||||
do
|
||||
nodes_selected_for_logs+=("${node_names[$index]}")
|
||||
done
|
||||
else
|
||||
nodes_selected_for_logs=( "${node_names[@]}" )
|
||||
fi
|
||||
|
||||
proc=${max_scp_processes}
|
||||
for node_name in "${nodes_selected_for_logs[@]}"; do
|
||||
node_dir="${report_dir}/${node_name}"
|
||||
mkdir -p "${node_dir}"
|
||||
# Save logs in the background. This speeds up things when there are
|
||||
# many nodes.
|
||||
save-logs "${node_name}" "${node_dir}" "${node_logfiles}" "${node_systemd_services}" &
|
||||
|
||||
# We don't want to run more than ${max_scp_processes} at a time, so
|
||||
# wait once we hit that many nodes. This isn't ideal, since one might
|
||||
# take much longer than the others, but it should help.
|
||||
proc=$((proc - 1))
|
||||
if [[ proc -eq 0 ]]; then
|
||||
proc=${max_scp_processes}
|
||||
wait
|
||||
fi
|
||||
done
|
||||
# Wait for any remaining processes.
|
||||
if [[ proc -gt 0 && proc -lt ${max_scp_processes} ]]; then
|
||||
wait
|
||||
fi
|
||||
}
|
||||
|
||||
function dump_nodes_with_logexporter() {
|
||||
echo "Detecting nodes in the cluster"
|
||||
detect-node-names &> /dev/null
|
||||
|
||||
if [[ "${#NODE_NAMES[@]}" == 0 ]]; then
|
||||
echo "No nodes found!"
|
||||
return
|
||||
fi
|
||||
|
||||
# Obtain parameters required by logexporter.
|
||||
local -r service_account_credentials="$(cat ${GOOGLE_APPLICATION_CREDENTIALS} | base64)"
|
||||
local -r cloud_provider="${KUBERNETES_PROVIDER}"
|
||||
local -r gcs_artifacts_dir="${GCS_ARTIFACTS_DIR}"
|
||||
local -r enable_hollow_node_logs="${ENABLE_HOLLOW_NODE_LOGS:-false}"
|
||||
local -r logexport_timeout_seconds="$(( 30 + NUM_NODES / 10 ))"
|
||||
|
||||
# Fill in the parameters in the logexporter daemonset template.
|
||||
sed -i'' -e "s/{{.ServiceAccountCredentials}}/${service_account_credentials}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
|
||||
sed -i'' -e "s/{{.CloudProvider}}/${cloud_provider}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
|
||||
sed -i'' -e "s/{{.GCSPath}}/${gcs_artifacts_dir}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
|
||||
sed -i'' -e "s/{{.EnableHollowNodeLogs}}/${enable_hollow_node_logs}/g" "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
|
||||
|
||||
# Create the logexporter namespace, service-account secret and the logexporter daemonset within that namespace.
|
||||
KUBECTL="${KUBECTL:-${KUBE_ROOT}/cluster/kubectl.sh}"
|
||||
"${KUBECTL}" create -f "${KUBE_ROOT}/cluster/log-dump/logexporter-daemonset.yaml"
|
||||
|
||||
# Give some time for the pods to finish uploading logs.
|
||||
sleep "${logexport_sleep_seconds}"
|
||||
|
||||
# List the logexporter pods created and their corresponding nodes.
|
||||
pods_and_nodes=()
|
||||
for retry in {1..5}; do
|
||||
pods_and_nodes=$(${KUBECTL} get pods -n logexporter -o=custom-columns=NAME:.metadata.name,NODE:.spec.nodeName | tail -n +2)
|
||||
if [[ -n "${pods_and_nodes}" ]]; then
|
||||
echo -e "List of logexporter pods found:\n${pods_and_nodes}"
|
||||
break
|
||||
fi
|
||||
if [[ "${retry}" == 5 ]]; then
|
||||
echo "Failed to list any logexporter pods after multiple retries.. falling back to logdump for nodes through SSH"
|
||||
"${KUBECTL}" delete namespace logexporter
|
||||
dump_nodes "${NODE_NAMES[@]}"
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
# Collect names of nodes we didn't find a logexporter pod on.
|
||||
# Note: This step is O(#nodes^2) as we check if each node is present in the list of nodes running logexporter.
|
||||
# Making it linear would add code complexity without much benefit (as it just takes < 1s for 5k nodes anyway).
|
||||
failed_nodes=()
|
||||
for node in "${NODE_NAMES[@]}"; do
|
||||
if [[ ! "${pods_and_nodes}" =~ "${node}" ]]; then
|
||||
failed_nodes+=("${node}")
|
||||
fi
|
||||
done
|
||||
|
||||
# Collect names of nodes whose logexporter pod didn't succeed.
|
||||
# TODO(shyamjvs): Parallelize the for loop below to make it faster (if needed).
|
||||
logexporter_pods=( $(echo "${pods_and_nodes}" | awk '{print $1}') )
|
||||
logexporter_nodes=( $(echo "${pods_and_nodes}" | awk '{print $2}') )
|
||||
for index in "${!logexporter_pods[@]}"; do
|
||||
pod="${logexporter_pods[$index]}"
|
||||
node="${logexporter_nodes[$index]}"
|
||||
# TODO(shyamjvs): Use a /status endpoint on the pod instead of checking its logs if that's faster.
|
||||
pod_success_log=$(${KUBECTL} get logs ${pod} -n logexporter 2>&1 | grep "Logs successfully uploaded") || true
|
||||
if [[ -z "${pod_success_log}" ]]; then
|
||||
failed_nodes+=("${node}")
|
||||
fi
|
||||
done
|
||||
|
||||
# Delete the logexporter resources and dump logs for the failed nodes (if any) through SSH.
|
||||
"${KUBECTL}" delete namespace logexporter
|
||||
if [[ "${#failed_nodes[@]}" != 0 ]]; then
|
||||
echo -e "Dumping logs through SSH for nodes logexporter failed to succeed on:\n${failed_nodes[@]}"
|
||||
dump_nodes "${failed_nodes[@]}"
|
||||
fi
|
||||
}
|
||||
|
||||
function main() {
|
||||
setup
|
||||
# Copy master logs to artifacts dir locally (through SSH).
|
||||
echo "Dumping logs from master locally to '${report_dir}'"
|
||||
dump_masters
|
||||
if [[ "${DUMP_ONLY_MASTER_LOGS:-}" == "true" ]]; then
|
||||
echo "Skipping dumping of node logs"
|
||||
return
|
||||
fi
|
||||
|
||||
# Copy logs from nodes to GCS directly or to artifacts dir locally (through SSH).
|
||||
if [[ "${ENABLE_LOGEXPORTER:-}" == "true" ]]; then
|
||||
if [[ -z "${GCS_ARTIFACTS_DIR:-}" ]]; then
|
||||
echo "Env var GCS_ARTIFACTS_DIR is empty. Failed to dump node logs to GCS."
|
||||
exit 1
|
||||
fi
|
||||
echo "Dumping logs from nodes to GCS directly at '${GCS_ARTIFACTS_DIR}'"
|
||||
dump_nodes_with_logexporter
|
||||
else
|
||||
echo "Dumping logs from nodes locally to '${report_dir}'"
|
||||
dump_nodes
|
||||
fi
|
||||
}
|
||||
|
||||
main
|
74
cluster/log-dump/logexporter-daemonset.yaml
Normal file
74
cluster/log-dump/logexporter-daemonset.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
# Template job config for running the log exporter on the cluster as a daemonset.
|
||||
# Creates everything within 'logexporter' namespace.
|
||||
#
|
||||
# Note: Since daemonsets have "AlwaysRestart" policy for pods, we provide a long
|
||||
# sleep-duration (24 hr) to the logexporter pods so they don't finish the work and
|
||||
# get restarted while some pods are still running. So it is your duty to detect
|
||||
# the work has been done (or use some timeout) and delete the daemonset yourself.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: logexporter
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: google-service-account
|
||||
namespace: logexporter
|
||||
type: Opaque
|
||||
data:
|
||||
service-account.json: {{.ServiceAccountCredentials}}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: logexporter
|
||||
namespace: logexporter
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: logexporter
|
||||
spec:
|
||||
containers:
|
||||
- name: logexporter-test
|
||||
image: gcr.io/google-containers/logexporter:v0.1.0
|
||||
env:
|
||||
- name: NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
command:
|
||||
- logexporter
|
||||
- --node-name=$(NODE_NAME)
|
||||
- --cloud-provider={{.CloudProvider}}
|
||||
- --gcs-path={{.GCSPath}}
|
||||
- --gcloud-auth-file-path=/etc/service-account/service-account.json
|
||||
- --enable-hollow-node-logs={{.EnableHollowNodeLogs}}
|
||||
- --sleep-duration=24h
|
||||
- --alsologtostderr
|
||||
volumeMounts:
|
||||
- mountPath: /etc/service-account
|
||||
name: service
|
||||
readOnly: true
|
||||
- mountPath: /var/log
|
||||
name: varlog
|
||||
readOnly: true
|
||||
- mountPath: /workspace/etc
|
||||
name: hostetc
|
||||
readOnly: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 10Mi
|
||||
volumes:
|
||||
- name: service
|
||||
secret:
|
||||
secretName: google-service-account
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: hostetc
|
||||
hostPath:
|
||||
path: /etc
|
@@ -43,6 +43,9 @@ CLUSTER_IP_RANGE=${CLUSTER_IP_RANGE:-10.244.0.0/16}
|
||||
|
||||
SWIFT_SERVER_URL=${SWIFT_SERVER_URL:-}
|
||||
|
||||
# The name of the object store container to use
|
||||
SWIFT_OBJECT_STORE=${SWIFT_OBJECT_STORE:-kubernetes}
|
||||
|
||||
# Flag indicates if new image must be created. If 'false' then image with IMAGE_ID will be used.
|
||||
# If 'true' then new image will be created from file config-image.sh
|
||||
CREATE_IMAGE=${CREATE_IMAGE:-true} # use "true" for devstack
|
||||
|
@@ -108,7 +108,7 @@ function create-stack() {
|
||||
# ROOT
|
||||
# KUBERNETES_RELEASE_TAR
|
||||
function upload-resources() {
|
||||
swift post kubernetes --read-acl '.r:*,.rlistings'
|
||||
swift post ${SWIFT_OBJECT_STORE} --read-acl '.r:*,.rlistings'
|
||||
|
||||
locations=(
|
||||
"${ROOT}/../../_output/release-tars/${KUBERNETES_RELEASE_TAR}"
|
||||
@@ -119,11 +119,11 @@ function upload-resources() {
|
||||
RELEASE_TAR_PATH=$(dirname ${RELEASE_TAR_LOCATION})
|
||||
|
||||
echo "[INFO] Uploading ${KUBERNETES_RELEASE_TAR}"
|
||||
swift upload kubernetes ${RELEASE_TAR_PATH}/${KUBERNETES_RELEASE_TAR} \
|
||||
swift upload ${SWIFT_OBJECT_STORE} ${RELEASE_TAR_PATH}/${KUBERNETES_RELEASE_TAR} \
|
||||
--object-name kubernetes-server.tar.gz
|
||||
|
||||
echo "[INFO] Uploading kubernetes-salt.tar.gz"
|
||||
swift upload kubernetes ${RELEASE_TAR_PATH}/kubernetes-salt.tar.gz \
|
||||
swift upload ${SWIFT_OBJECT_STORE} ${RELEASE_TAR_PATH}/kubernetes-salt.tar.gz \
|
||||
--object-name kubernetes-salt.tar.gz
|
||||
}
|
||||
|
||||
@@ -196,7 +196,7 @@ function run-heat-script() {
|
||||
fi
|
||||
SWIFT_SERVER_URL=$(openstack catalog show object-store --format value | egrep -o "$rgx" | cut -d" " -f2 | head -n 1)
|
||||
fi
|
||||
local swift_repo_url="${SWIFT_SERVER_URL}/kubernetes"
|
||||
local swift_repo_url="${SWIFT_SERVER_URL}/${SWIFT_OBJECT_STORE}"
|
||||
|
||||
if [ $CREATE_IMAGE = true ]; then
|
||||
echo "[INFO] Retrieve new image ID"
|
||||
|
62
cluster/pre-existing/util.sh
Normal file
62
cluster/pre-existing/util.sh
Normal file
@@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# A library of helper functions for landing kubemark containers on a
|
||||
# pre-existing Kubernetes master. See test/kubemark/pre-existing/README.md
|
||||
# for me details on using a pre-existing provider.
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
|
||||
source "${KUBE_ROOT}/cluster/common.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
function detect-project() {
|
||||
if [[ -z "${MASTER_IP:-}" ]]; then
|
||||
echo "Set 'MASTER_IP' to the instance assigned to be the Kubernetes master" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${PROJECT:-}" ]]; then
|
||||
echo "Set 'PROJECT' to the name of the container project: $CONTAINER_REGISTRY/$PROJECT/kubemark" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${SERVICE_CLUSTER_IP_RANGE:-}" ]]; then
|
||||
cluster_range=$(echo "${MASTER_IP}" | awk -F '.' '{printf("%d.%d.%d.0", $1, $2, $3)}')
|
||||
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-$cluster_range/16}"
|
||||
fi
|
||||
}
|
||||
|
||||
function create-certs {
|
||||
rm /tmp/kubeconfig
|
||||
|
||||
execute-cmd-on-pre-existing-master-with-retries "sudo cat /etc/kubernetes/admin.conf" > /tmp/kubeconfig
|
||||
CA_CERT_BASE64=$(cat /tmp/kubeconfig | grep certificate-authority | awk '{print $2}' | head -n 1)
|
||||
KUBELET_CERT_BASE64=$(cat /tmp/kubeconfig | grep client-certificate-data | awk '{print $2}' | head -n 1)
|
||||
KUBELET_KEY_BASE64=$(cat /tmp/kubeconfig | grep client-key-data | awk '{print $2}' | head -n 1)
|
||||
|
||||
# Local kubeconfig.kubemark vars
|
||||
KUBECFG_CERT_BASE64="${KUBELET_CERT_BASE64}"
|
||||
KUBECFG_KEY_BASE64="${KUBELET_KEY_BASE64}"
|
||||
|
||||
# The pre-existing Kubernetes master already has these setup
|
||||
# Set these vars but don't use them
|
||||
CA_KEY_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
MASTER_CERT_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
MASTER_KEY_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBEAPISERVER_CERT_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBEAPISERVER_KEY_BASE64=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
}
|
@@ -35,4 +35,3 @@ reviewers:
|
||||
- k82cn
|
||||
- caseydavenport
|
||||
- johscheuer
|
||||
- rjnagal
|
||||
|
@@ -57,7 +57,7 @@ if [[ "${KUBERNETES_PROVIDER:-}" == "gce" ]]; then
|
||||
# In multizone mode we need to add instances for all nodes in the region.
|
||||
if [[ "${MULTIZONE:-}" == "true" ]]; then
|
||||
EXPECTED_NUM_NODES=$(gcloud -q compute instances list --project="${PROJECT}" --format=[no-heading] --regexp="${NODE_INSTANCE_PREFIX}.*" \
|
||||
--zones=$(gcloud -q compute zones list --project="${PROJECT}" --filter=region=${REGION} --format=[no-heading]\(name\) | tr "\n" "," | sed "s/,$//") | wc -l)
|
||||
--zones=$(gcloud -q compute zones list --project="${PROJECT}" --filter=region=${REGION} --format=csv[no-heading]\(name\) | tr "\n" "," | sed "s/,$//") | wc -l)
|
||||
echo "Computing number of nodes, NODE_INSTANCE_PREFIX=${NODE_INSTANCE_PREFIX}, REGION=${REGION}, EXPECTED_NUM_NODES=${EXPECTED_NUM_NODES}"
|
||||
fi
|
||||
fi
|
||||
|
@@ -16,8 +16,6 @@ go_library(
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
"//pkg/client/leaderelection:go_default_library",
|
||||
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/cloud:go_default_library",
|
||||
@@ -32,9 +30,12 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@@ -30,16 +30,17 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/cmd/cloud-controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
nodecontroller "k8s.io/kubernetes/pkg/controller/cloud"
|
||||
@@ -102,7 +103,7 @@ func Run(s *options.CloudControllerManagerServer, cloud cloudprovider.Interface)
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
|
||||
// Start the external controller manager server
|
||||
go func() {
|
||||
@@ -253,9 +254,8 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
// important when we start apiserver and controller manager at the same time.
|
||||
var versionStrings []string
|
||||
err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
|
||||
if versionStrings, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
|
||||
if _, err = restclient.ServerAPIVersions(kubeconfig); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
glog.Errorf("Failed to get api versions from server: %v", err)
|
||||
|
@@ -13,7 +13,7 @@ go_library(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/leaderelection:go_default_library",
|
||||
"//pkg/client/leaderelectionconfig:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
|
@@ -22,7 +22,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
|
||||
// add the kubernetes feature gates
|
||||
@@ -56,7 +56,7 @@ func NewCloudControllerManagerServer() *CloudControllerManagerServer {
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||
LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(),
|
||||
ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second},
|
||||
},
|
||||
NodeStatusUpdateFrequency: metav1.Duration{Duration: 5 * time.Minute},
|
||||
@@ -90,7 +90,7 @@ func (s *CloudControllerManagerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.")
|
||||
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
leaderelectionconfig.BindFlags(&s.LeaderElection, fs)
|
||||
|
||||
utilfeature.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
@@ -19,6 +19,7 @@ go_library(
|
||||
srcs = ["gen_kube_docs.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/cloud-controller-manager/app:go_default_library",
|
||||
"//cmd/genutils:go_default_library",
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-controller-manager/app:go_default_library",
|
||||
|
@@ -21,6 +21,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra/doc"
|
||||
ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app"
|
||||
"k8s.io/kubernetes/cmd/genutils"
|
||||
apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app"
|
||||
@@ -56,6 +57,10 @@ func main() {
|
||||
// generate docs for kube-controller-manager
|
||||
controllermanager := cmapp.NewControllerManagerCommand()
|
||||
doc.GenMarkdownTree(controllermanager, outDir)
|
||||
case "cloud-controller-manager":
|
||||
// generate docs for cloud-controller-manager
|
||||
cloudcontrollermanager := ccmapp.NewCloudControllerManagerCommand()
|
||||
doc.GenMarkdownTree(cloudcontrollermanager, outDir)
|
||||
case "kube-proxy":
|
||||
// generate docs for kube-proxy
|
||||
proxy := proxyapp.NewProxyCommand()
|
||||
|
@@ -19,6 +19,7 @@ go_library(
|
||||
srcs = ["gen_kube_man.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/cloud-controller-manager/app:go_default_library",
|
||||
"//cmd/genutils:go_default_library",
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-controller-manager/app:go_default_library",
|
||||
|
@@ -26,6 +26,7 @@ import (
|
||||
mangen "github.com/cpuguy83/go-md2man/md2man"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
ccmapp "k8s.io/kubernetes/cmd/cloud-controller-manager/app"
|
||||
"k8s.io/kubernetes/cmd/genutils"
|
||||
apiservapp "k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
cmapp "k8s.io/kubernetes/cmd/kube-controller-manager/app"
|
||||
@@ -73,6 +74,13 @@ func main() {
|
||||
for _, c := range controllermanager.Commands() {
|
||||
genMarkdown(c, "kube-controller-manager", outDir)
|
||||
}
|
||||
case "cloud-controller-manager":
|
||||
//generate manpage for cloud-controller-manager
|
||||
controllermanager := ccmapp.NewCloudControllerManagerCommand()
|
||||
genMarkdown(controllermanager, "", outDir)
|
||||
for _, c := range controllermanager.Commands() {
|
||||
genMarkdown(c, "cloud-controller-manager", outDir)
|
||||
}
|
||||
case "kube-proxy":
|
||||
// generate manpage for kube-proxy
|
||||
proxy := proxyapp.NewProxyCommand()
|
||||
|
@@ -39,7 +39,7 @@ go_library(
|
||||
"//pkg/kubeapiserver/options:go_default_library",
|
||||
"//pkg/kubeapiserver/server:go_default_library",
|
||||
"//pkg/master:go_default_library",
|
||||
"//pkg/master/thirdparty:go_default_library",
|
||||
"//pkg/master/controller/crdregistration:go_default_library",
|
||||
"//pkg/master/tunneler:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//pkg/registry/cachesize:go_default_library",
|
||||
@@ -89,7 +89,6 @@ go_library(
|
||||
"//vendor/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/registry/generic:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library",
|
||||
@@ -119,6 +118,7 @@ filegroup(
|
||||
":package-srcs",
|
||||
"//cmd/kube-apiserver/app/options:all-srcs",
|
||||
"//cmd/kube-apiserver/app/preflight:all-srcs",
|
||||
"//cmd/kube-apiserver/app/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
@@ -41,8 +41,7 @@ import (
|
||||
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion"
|
||||
"k8s.io/kube-aggregator/pkg/controllers/autoregister"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
"k8s.io/kubernetes/pkg/master/thirdparty"
|
||||
"k8s.io/kubernetes/pkg/master/controller/crdregistration"
|
||||
)
|
||||
|
||||
func createAggregatorConfig(kubeAPIServerConfig genericapiserver.Config, commandOptions *options.ServerRunOptions, externalInformers kubeexternalinformers.SharedInformerFactory, serviceResolver aggregatorapiserver.ServiceResolver, proxyTransport *http.Transport) (*aggregatorapiserver.Config, error) {
|
||||
@@ -85,7 +84,7 @@ func createAggregatorConfig(kubeAPIServerConfig genericapiserver.Config, command
|
||||
return aggregatorConfig, nil
|
||||
}
|
||||
|
||||
func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, kubeInformers informers.SharedInformerFactory, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) {
|
||||
func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) {
|
||||
aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -98,14 +97,13 @@ func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delega
|
||||
}
|
||||
autoRegistrationController := autoregister.NewAutoRegisterController(aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), apiRegistrationClient)
|
||||
apiServices := apiServicesToRegister(delegateAPIServer, autoRegistrationController)
|
||||
tprRegistrationController := thirdparty.NewAutoRegistrationController(
|
||||
kubeInformers.Extensions().InternalVersion().ThirdPartyResources(),
|
||||
crdRegistrationController := crdregistration.NewAutoRegistrationController(
|
||||
apiExtensionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(),
|
||||
autoRegistrationController)
|
||||
|
||||
aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error {
|
||||
go autoRegistrationController.Run(5, context.StopCh)
|
||||
go tprRegistrationController.Run(5, context.StopCh)
|
||||
go crdRegistrationController.Run(5, context.StopCh)
|
||||
return nil
|
||||
})
|
||||
aggregatorServer.GenericAPIServer.AddHealthzChecks(healthz.NamedCheck("autoregister-completion", func(r *http.Request) error {
|
||||
|
@@ -143,7 +143,7 @@ func (s *ServerRunOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
"Amount of time to retain events.")
|
||||
|
||||
fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged,
|
||||
"If true, allow privileged containers.")
|
||||
"If true, allow privileged containers. [default=false]")
|
||||
|
||||
fs.BoolVar(&s.EnableLogsHandler, "enable-logs-handler", s.EnableLogsHandler,
|
||||
"If true, install a /logs handler for the apiserver logs.")
|
||||
|
@@ -63,6 +63,9 @@ func (options *ServerRunOptions) Validate() []error {
|
||||
if errs := options.Authentication.Validate(); len(errs) > 0 {
|
||||
errors = append(errors, errs...)
|
||||
}
|
||||
if errs := options.Audit.Validate(); len(errs) > 0 {
|
||||
errors = append(errors, errs...)
|
||||
}
|
||||
if errs := options.InsecureServing.Validate("insecure-port"); len(errs) > 0 {
|
||||
errors = append(errors, errs...)
|
||||
}
|
||||
|
@@ -47,7 +47,6 @@ import (
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/authenticator"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/server/filters"
|
||||
"k8s.io/apiserver/pkg/server/options/encryptionconfig"
|
||||
@@ -111,30 +110,39 @@ func Run(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) error {
|
||||
// To help debugging, immediately log version
|
||||
glog.Infof("Version: %+v", version.Get())
|
||||
|
||||
nodeTunneler, proxyTransport, err := CreateNodeDialer(runOptions)
|
||||
server, err := CreateServerChain(runOptions, stopCh)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return server.PrepareRun().Run(stopCh)
|
||||
}
|
||||
|
||||
// CreateServerChain creates the apiservers connected via delegation.
|
||||
func CreateServerChain(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) (*genericapiserver.GenericAPIServer, error) {
|
||||
nodeTunneler, proxyTransport, err := CreateNodeDialer(runOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kubeAPIServerConfig, sharedInformers, versionedInformers, insecureServingOptions, serviceResolver, err := CreateKubeAPIServerConfig(runOptions, nodeTunneler, proxyTransport)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TPRs are enabled and not yet beta, since this these are the successor, they fall under the same enablement rule
|
||||
// If additional API servers are added, they should be gated.
|
||||
apiExtensionsConfig, err := createAPIExtensionsConfig(*kubeAPIServerConfig.GenericConfig, runOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
apiExtensionsServer, err := createAPIExtensionsServer(apiExtensionsConfig, genericapiserver.EmptyDelegate)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeAPIServer, err := CreateKubeAPIServer(kubeAPIServerConfig, apiExtensionsServer.GenericAPIServer, sharedInformers, apiExtensionsConfig.CRDRESTOptionsGetter)
|
||||
kubeAPIServer, err := CreateKubeAPIServer(kubeAPIServerConfig, apiExtensionsServer.GenericAPIServer, sharedInformers)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if we're starting up a hacked up version of this API server for a weird test case,
|
||||
@@ -143,11 +151,11 @@ func Run(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) error {
|
||||
if insecureServingOptions != nil {
|
||||
insecureHandlerChain := kubeserver.BuildInsecureHandlerChain(kubeAPIServer.GenericAPIServer.UnprotectedHandler(), kubeAPIServerConfig.GenericConfig)
|
||||
if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, stopCh); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return kubeAPIServer.GenericAPIServer.PrepareRun().Run(stopCh)
|
||||
return kubeAPIServer.GenericAPIServer, nil
|
||||
}
|
||||
|
||||
// otherwise go down the normal path of standing the aggregator up in front of the API server
|
||||
@@ -157,29 +165,29 @@ func Run(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) error {
|
||||
// aggregator comes last in the chain
|
||||
aggregatorConfig, err := createAggregatorConfig(*kubeAPIServerConfig.GenericConfig, runOptions, versionedInformers, serviceResolver, proxyTransport)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
aggregatorConfig.ProxyTransport = proxyTransport
|
||||
aggregatorConfig.ServiceResolver = serviceResolver
|
||||
aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, sharedInformers, apiExtensionsServer.Informers)
|
||||
aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, apiExtensionsServer.Informers)
|
||||
if err != nil {
|
||||
// we don't need special handling for innerStopCh because the aggregator server doesn't create any go routines
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if insecureServingOptions != nil {
|
||||
insecureHandlerChain := kubeserver.BuildInsecureHandlerChain(aggregatorServer.GenericAPIServer.UnprotectedHandler(), kubeAPIServerConfig.GenericConfig)
|
||||
if err := kubeserver.NonBlockingRun(insecureServingOptions, insecureHandlerChain, stopCh); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return aggregatorServer.GenericAPIServer.PrepareRun().Run(stopCh)
|
||||
return aggregatorServer.GenericAPIServer, nil
|
||||
}
|
||||
|
||||
// CreateKubeAPIServer creates and wires a workable kube-apiserver
|
||||
func CreateKubeAPIServer(kubeAPIServerConfig *master.Config, delegateAPIServer genericapiserver.DelegationTarget, sharedInformers informers.SharedInformerFactory, crdRESTOptionsGetter genericregistry.RESTOptionsGetter) (*master.Master, error) {
|
||||
kubeAPIServer, err := kubeAPIServerConfig.Complete().New(delegateAPIServer, crdRESTOptionsGetter)
|
||||
func CreateKubeAPIServer(kubeAPIServerConfig *master.Config, delegateAPIServer genericapiserver.DelegationTarget, sharedInformers informers.SharedInformerFactory) (*master.Master, error) {
|
||||
kubeAPIServer, err := kubeAPIServerConfig.Complete().New(delegateAPIServer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -256,8 +264,10 @@ func CreateKubeAPIServerConfig(s *options.ServerRunOptions, nodeTunneler tunnele
|
||||
return nil, nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
if err := utilwait.PollImmediate(etcdRetryInterval, etcdRetryLimit*etcdRetryInterval, preflight.EtcdConnection{ServerList: s.Etcd.StorageConfig.ServerList}.CheckEtcdServers); err != nil {
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("error waiting for etcd connection: %v", err)
|
||||
if _, port, err := net.SplitHostPort(s.Etcd.StorageConfig.ServerList[0]); err == nil && port != "0" && len(port) != 0 {
|
||||
if err := utilwait.PollImmediate(etcdRetryInterval, etcdRetryLimit*etcdRetryInterval, preflight.EtcdConnection{ServerList: s.Etcd.StorageConfig.ServerList}.CheckEtcdServers); err != nil {
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("error waiting for etcd connection: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
capabilities.Initialize(capabilities.Capabilities{
|
||||
|
56
cmd/kube-apiserver/app/testing/BUILD
Normal file
56
cmd/kube-apiserver/app/testing/BUILD
Normal file
@@ -0,0 +1,56 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["server_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor/k8s.io/api/apps/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/networking/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["testserver.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kube-apiserver/app:go_default_library",
|
||||
"//cmd/kube-apiserver/app/options:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/storage/etcd/testing:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
231
cmd/kube-apiserver/app/testing/server_test.go
Normal file
231
cmd/kube-apiserver/app/testing/server_test.go
Normal file
@@ -0,0 +1,231 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
config, tearDown := StartTestServerOrDie(t)
|
||||
defer tearDown()
|
||||
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// test whether the server is really healthy after /healthz told us so
|
||||
t.Logf("Creating Deployment directly after being healthy")
|
||||
var replicas int32 = 1
|
||||
_, err = client.AppsV1beta1().Deployments("default").Create(&appsv1beta1.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Deployment",
|
||||
APIVersion: "apps/v1beta1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "test",
|
||||
},
|
||||
Spec: appsv1beta1.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Strategy: appsv1beta1.DeploymentStrategy{
|
||||
Type: appsv1beta1.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "foo",
|
||||
Image: "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create deployment: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCRDShadowGroup(t *testing.T) {
|
||||
config, tearDown := StartTestServerOrDie(t)
|
||||
defer tearDown()
|
||||
|
||||
kubeclient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
apiextensionsclient, err := apiextensionsclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Creating a NetworkPolicy")
|
||||
nwPolicy, err := kubeclient.NetworkingV1().NetworkPolicies("default").Create(&networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "abc", Namespace: metav1.NamespaceDefault},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create NetworkPolicy: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Trying to shadow networking group")
|
||||
crd := &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foos." + networkingv1.GroupName,
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: networkingv1.GroupName,
|
||||
Version: networkingv1.SchemeGroupVersion.Version,
|
||||
Scope: apiextensionsv1beta1.ClusterScoped,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "foos",
|
||||
Kind: "Foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil {
|
||||
t.Fatalf("Failed to create networking group CRD: %v", err)
|
||||
}
|
||||
if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil {
|
||||
t.Fatalf("Failed to establish networking group CRD: %v", err)
|
||||
}
|
||||
// wait to give aggregator time to update
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
t.Logf("Checking that we still see the NetworkPolicy")
|
||||
_, err = kubeclient.NetworkingV1().NetworkPolicies(nwPolicy.Namespace).Get(nwPolicy.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get NetworkPolocy: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Checking that crd resource does not show up in networking group")
|
||||
found, err := crdExistsInDiscovery(apiextensionsclient, crd)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected discovery error: %v", err)
|
||||
}
|
||||
if found {
|
||||
t.Errorf("CRD resource shows up in discovery, but shouldn't.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCRD(t *testing.T) {
|
||||
config, tearDown := StartTestServerOrDie(t)
|
||||
defer tearDown()
|
||||
|
||||
apiextensionsclient, err := apiextensionsclientset.NewForConfig(config)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Trying to create a custom resource without conflict")
|
||||
crd := &apiextensionsv1beta1.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foos.cr.bar.com",
|
||||
},
|
||||
Spec: apiextensionsv1beta1.CustomResourceDefinitionSpec{
|
||||
Group: "cr.bar.com",
|
||||
Version: "v1",
|
||||
Scope: apiextensionsv1beta1.NamespaceScoped,
|
||||
Names: apiextensionsv1beta1.CustomResourceDefinitionNames{
|
||||
Plural: "foos",
|
||||
Kind: "Foo",
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err = apiextensionsclient.ApiextensionsV1beta1().CustomResourceDefinitions().Create(crd); err != nil {
|
||||
t.Fatalf("Failed to create foos.cr.bar.com CRD; %v", err)
|
||||
}
|
||||
if err := waitForEstablishedCRD(apiextensionsclient, crd.Name); err != nil {
|
||||
t.Fatalf("Failed to establish foos.cr.bar.com CRD: %v", err)
|
||||
}
|
||||
if err := wait.PollImmediate(500*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
return crdExistsInDiscovery(apiextensionsclient, crd)
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to see foos.cr.bar.com in discovery: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Trying to access foos.cr.bar.com with dynamic client")
|
||||
barComConfig := *config
|
||||
barComConfig.GroupVersion = &schema.GroupVersion{Group: "cr.bar.com", Version: "v1"}
|
||||
barComConfig.APIPath = "/apis"
|
||||
barComClient, err := dynamic.NewClient(&barComConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
_, err = barComClient.Resource(&metav1.APIResource{Name: "foos", Namespaced: true}, "default").List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list foos.cr.bar.com instances: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForEstablishedCRD(client apiextensionsclientset.Interface, name string) error {
|
||||
return wait.PollImmediate(500*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
crd, err := client.ApiextensionsV1beta1().CustomResourceDefinitions().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, cond := range crd.Status.Conditions {
|
||||
switch cond.Type {
|
||||
case apiextensionsv1beta1.Established:
|
||||
if cond.Status == apiextensionsv1beta1.ConditionTrue {
|
||||
return true, err
|
||||
}
|
||||
case apiextensionsv1beta1.NamesAccepted:
|
||||
if cond.Status == apiextensionsv1beta1.ConditionFalse {
|
||||
fmt.Printf("Name conflict: %v\n", cond.Reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
func crdExistsInDiscovery(client apiextensionsclientset.Interface, crd *apiextensionsv1beta1.CustomResourceDefinition) (bool, error) {
|
||||
resourceList, err := client.Discovery().ServerResourcesForGroupVersion(crd.Spec.Group + "/" + crd.Spec.Version)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, resource := range resourceList.APIResources {
|
||||
if resource.Name == crd.Spec.Names.Plural {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
157
cmd/kube-apiserver/app/testing/testserver.go
Normal file
157
cmd/kube-apiserver/app/testing/testserver.go
Normal file
@@ -0,0 +1,157 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
etcdtesting "k8s.io/apiserver/pkg/storage/etcd/testing"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app"
|
||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// TearDownFunc is to be called to tear down a test server.
|
||||
type TearDownFunc func()
|
||||
|
||||
// StartTestServer starts a etcd server and kube-apiserver. A rest client config and a tear-down func
|
||||
// are returned.
|
||||
//
|
||||
// Note: we return a tear-down func instead of a stop channel because the later will leak temporariy
|
||||
// files that becaues Golang testing's call to os.Exit will not give a stop channel go routine
|
||||
// enough time to remove temporariy files.
|
||||
func StartTestServer(t *testing.T) (result *restclient.Config, tearDownForCaller TearDownFunc, err error) {
|
||||
var tmpDir string
|
||||
var etcdServer *etcdtesting.EtcdTestServer
|
||||
stopCh := make(chan struct{})
|
||||
tearDown := func() {
|
||||
close(stopCh)
|
||||
if etcdServer != nil {
|
||||
etcdServer.Terminate(t)
|
||||
}
|
||||
if len(tmpDir) != 0 {
|
||||
os.RemoveAll(tmpDir)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if tearDownForCaller == nil {
|
||||
tearDown()
|
||||
}
|
||||
}()
|
||||
|
||||
t.Logf("Starting etcd...")
|
||||
etcdServer, storageConfig := etcdtesting.NewUnsecuredEtcd3TestClientServer(t, api.Scheme)
|
||||
|
||||
tmpDir, err = ioutil.TempDir("", "kubernetes-kube-apiserver")
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to create temp dir: %v", err)
|
||||
}
|
||||
|
||||
s := options.NewServerRunOptions()
|
||||
s.InsecureServing.BindPort = 0
|
||||
s.SecureServing.BindPort = freePort()
|
||||
s.SecureServing.ServerCert.CertDirectory = tmpDir
|
||||
s.ServiceClusterIPRange.IP = net.IPv4(10, 0, 0, 0)
|
||||
s.ServiceClusterIPRange.Mask = net.CIDRMask(16, 32)
|
||||
s.Etcd.StorageConfig = *storageConfig
|
||||
s.Etcd.DefaultStorageMediaType = "application/json"
|
||||
s.Admission.PluginNames = strings.Split("Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds", ",")
|
||||
|
||||
t.Logf("Starting kube-apiserver...")
|
||||
runErrCh := make(chan error, 1)
|
||||
server, err := app.CreateServerChain(s, stopCh)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to create server chain: %v", err)
|
||||
}
|
||||
go func(stopCh <-chan struct{}) {
|
||||
if err := server.PrepareRun().Run(stopCh); err != nil {
|
||||
t.Logf("kube-apiserver exited uncleanly: %v", err)
|
||||
runErrCh <- err
|
||||
}
|
||||
}(stopCh)
|
||||
|
||||
t.Logf("Waiting for /healthz to be ok...")
|
||||
client, err := kubernetes.NewForConfig(server.LoopbackClientConfig)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to create a client: %v", err)
|
||||
}
|
||||
err = wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
|
||||
select {
|
||||
case err := <-runErrCh:
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
|
||||
result := client.CoreV1Client.RESTClient().Get().AbsPath("/healthz").Do()
|
||||
status := 0
|
||||
result.StatusCode(&status)
|
||||
if status == 200 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Failed to wait for /healthz to return ok: %v", err)
|
||||
}
|
||||
|
||||
// from here the caller must call tearDown
|
||||
return server.LoopbackClientConfig, tearDown, nil
|
||||
}
|
||||
|
||||
// StartTestServerOrDie calls StartTestServer with up to 5 retries on bind error and dies with
|
||||
// t.Fatal if it does not succeed.
|
||||
func StartTestServerOrDie(t *testing.T) (*restclient.Config, TearDownFunc) {
|
||||
// retry test because the bind might fail due to a race with another process
|
||||
// binding to the port. We cannot listen to :0 (then the kernel would give us
|
||||
// a port which is free for sure), so we need this workaround.
|
||||
for retry := 0; retry < 5 && !t.Failed(); retry++ {
|
||||
config, td, err := StartTestServer(t)
|
||||
if err == nil {
|
||||
return config, td
|
||||
}
|
||||
if err != nil && !strings.Contains(err.Error(), "bind") {
|
||||
break
|
||||
}
|
||||
t.Logf("Bind error, retrying...")
|
||||
}
|
||||
|
||||
t.Fatalf("Failed to launch server")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func freePort() int {
|
||||
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
l, err := net.ListenTCP("tcp", addr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer l.Close()
|
||||
return l.Addr().(*net.TCPAddr).Port
|
||||
}
|
@@ -5,7 +5,6 @@ approvers:
|
||||
reviewers:
|
||||
- '249043822'
|
||||
- a-robinson
|
||||
- bprashanth
|
||||
- brendandburns
|
||||
- caesarxuchao
|
||||
- cjcullen
|
||||
|
@@ -30,8 +30,6 @@ go_library(
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/externalversions:go_default_library",
|
||||
"//pkg/client/leaderelection:go_default_library",
|
||||
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
@@ -105,9 +103,12 @@ go_library(
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1:go_default_library",
|
||||
|
@@ -40,18 +40,19 @@ import (
|
||||
|
||||
clientv1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
@@ -127,7 +128,7 @@ func Run(s *options.CMServer) error {
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
|
||||
go func() {
|
||||
mux := http.NewServeMux()
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment"
|
||||
replicaset "k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
)
|
||||
|
||||
func startDaemonSetController(ctx ControllerContext) (bool, error) {
|
||||
|
@@ -13,7 +13,7 @@ go_library(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/leaderelection:go_default_library",
|
||||
"//pkg/client/leaderelectionconfig:go_default_library",
|
||||
"//pkg/controller/garbagecollector:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
|
@@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
|
||||
@@ -106,7 +106,7 @@ func NewCMServer() *CMServer {
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||
LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(),
|
||||
ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second},
|
||||
EnableGarbageCollector: true,
|
||||
ConcurrentGCSyncs: 20,
|
||||
@@ -225,9 +225,9 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled
|
||||
fs.BoolVar(&s.DisableAttachDetachReconcilerSync, "disable-attach-detach-reconcile-sync", false, "Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.")
|
||||
fs.DurationVar(&s.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.")
|
||||
fs.BoolVar(&s.EnableTaintManager, "enable-taint-manager", s.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.")
|
||||
fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizonal pod autoscaler.")
|
||||
fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.")
|
||||
|
||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||
leaderelectionconfig.BindFlags(&s.LeaderElection, fs)
|
||||
|
||||
utilfeature.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
@@ -23,6 +23,7 @@ go_library(
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/proxy:go_default_library",
|
||||
"//pkg/proxy/config:go_default_library",
|
||||
"//pkg/proxy/healthcheck:go_default_library",
|
||||
@@ -72,10 +73,13 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/apis/componentconfig/v1alpha1:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/configz:go_default_library",
|
||||
"//pkg/util/iptables:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@@ -53,6 +53,7 @@ import (
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
proxyconfig "k8s.io/kubernetes/pkg/proxy/config"
|
||||
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
||||
@@ -129,6 +130,7 @@ func AddFlags(options *Options, fs *pflag.FlagSet) {
|
||||
fs.StringVar(&options.master, "master", options.master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
|
||||
fs.Int32Var(&options.healthzPort, "healthz-port", options.healthzPort, "The port to bind the health check server. Use 0 to disable.")
|
||||
fs.Var(componentconfig.IPVar{Val: &options.config.HealthzBindAddress}, "healthz-bind-address", "The IP address and port for the health check server to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Var(componentconfig.IPVar{Val: &options.config.MetricsBindAddress}, "metrics-bind-address", "The IP address and port for the metrics server to serve on (set to 0.0.0.0 for all interfaces)")
|
||||
fs.Int32Var(options.config.OOMScoreAdj, "oom-score-adj", util.Int32PtrDerefOr(options.config.OOMScoreAdj, int32(qos.KubeProxyOOMScoreAdj)), "The oom-score-adj value for kube-proxy process. Values must be within the range [-1000, 1000]")
|
||||
fs.StringVar(&options.config.ResourceContainer, "resource-container", options.config.ResourceContainer, "Absolute name of the resource-only container to create and run the Kube-proxy in (Default: /kube-proxy).")
|
||||
fs.MarkDeprecated("resource-container", "This feature will be removed in a later release.")
|
||||
@@ -166,7 +168,7 @@ func AddFlags(options *Options, fs *pflag.FlagSet) {
|
||||
func NewOptions() (*Options, error) {
|
||||
o := &Options{
|
||||
config: new(componentconfig.KubeProxyConfiguration),
|
||||
healthzPort: 10256,
|
||||
healthzPort: ports.ProxyHealthzPort,
|
||||
}
|
||||
|
||||
o.scheme = runtime.NewScheme()
|
||||
@@ -447,7 +449,7 @@ func NewProxyServer(config *componentconfig.KubeProxyConfiguration, cleanupAndEx
|
||||
|
||||
// We omit creation of pretty much everything if we run in cleanup mode
|
||||
if cleanupAndExit {
|
||||
return &ProxyServer{IptInterface: iptInterface}, nil
|
||||
return &ProxyServer{IptInterface: iptInterface, CleanupAndExit: cleanupAndExit}, nil
|
||||
}
|
||||
|
||||
client, eventClient, err := createClients(config.ClientConnection, master)
|
||||
@@ -627,7 +629,9 @@ func (s *ProxyServer) Run() error {
|
||||
}
|
||||
}
|
||||
|
||||
s.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: s.EventClient.Events("")})
|
||||
if s.Broadcaster != nil && s.EventClient != nil {
|
||||
s.Broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: s.EventClient.Events("")})
|
||||
}
|
||||
|
||||
// Start up a healthz server if requested
|
||||
if s.HealthzServer != nil {
|
||||
|
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package app
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
@@ -27,10 +28,13 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sRuntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
"k8s.io/kubernetes/pkg/util/iptables"
|
||||
)
|
||||
|
||||
@@ -134,23 +138,67 @@ func Test_getProxyMode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// This test verifies that Proxy Server does not crash when CleanupAndExit is true.
|
||||
// TestNewOptionsFailures tests failure modes for NewOptions()
|
||||
func TestNewOptionsFailures(t *testing.T) {
|
||||
|
||||
// Create a fake scheme builder that generates an error
|
||||
errString := fmt.Sprintf("Simulated error")
|
||||
genError := func(scheme *k8sRuntime.Scheme) error {
|
||||
return errors.New(errString)
|
||||
}
|
||||
fakeSchemeBuilder := k8sRuntime.NewSchemeBuilder(genError)
|
||||
|
||||
simulatedErrorTest := func(target string) {
|
||||
var addToScheme *func(s *k8sRuntime.Scheme) error
|
||||
if target == "componentconfig" {
|
||||
addToScheme = &componentconfig.AddToScheme
|
||||
} else {
|
||||
addToScheme = &v1alpha1.AddToScheme
|
||||
}
|
||||
restoreValue := *addToScheme
|
||||
restore := func() {
|
||||
*addToScheme = restoreValue
|
||||
}
|
||||
defer restore()
|
||||
*addToScheme = fakeSchemeBuilder.AddToScheme
|
||||
_, err := NewOptions()
|
||||
assert.Error(t, err, fmt.Sprintf("Simulated error in component %s", target))
|
||||
}
|
||||
|
||||
// Simulate errors in calls to AddToScheme()
|
||||
faultTargets := []string{"componentconfig", "v1alpha1"}
|
||||
for _, target := range faultTargets {
|
||||
simulatedErrorTest(target)
|
||||
}
|
||||
}
|
||||
|
||||
// This test verifies that NewProxyServer does not crash when CleanupAndExit is true.
|
||||
func TestProxyServerWithCleanupAndExit(t *testing.T) {
|
||||
options, err := NewOptions()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
// Each bind address below is a separate test case
|
||||
bindAddresses := []string{
|
||||
"0.0.0.0",
|
||||
"2001:db8::1",
|
||||
}
|
||||
for _, addr := range bindAddresses {
|
||||
options, err := NewOptions()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error with address %s: %v", addr, err)
|
||||
}
|
||||
|
||||
options.config = &componentconfig.KubeProxyConfiguration{
|
||||
BindAddress: "0.0.0.0",
|
||||
options.config = &componentconfig.KubeProxyConfiguration{
|
||||
BindAddress: addr,
|
||||
}
|
||||
options.CleanupAndExit = true
|
||||
|
||||
proxyserver, err := NewProxyServer(options.config, options.CleanupAndExit, options.scheme, options.master)
|
||||
|
||||
assert.Nil(t, err, "unexpected error in NewProxyServer, addr: %s", addr)
|
||||
assert.NotNil(t, proxyserver, "nil proxy server obj, addr: %s", addr)
|
||||
assert.NotNil(t, proxyserver.IptInterface, "nil iptables intf, addr: %s", addr)
|
||||
|
||||
// Clean up config for next test case
|
||||
configz.Delete("componentconfig")
|
||||
}
|
||||
options.CleanupAndExit = true
|
||||
|
||||
proxyserver, err := NewProxyServer(options.config, options.CleanupAndExit, options.scheme, options.master)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.NotNil(t, proxyserver)
|
||||
assert.NotNil(t, proxyserver.IptInterface)
|
||||
}
|
||||
|
||||
func TestGetConntrackMax(t *testing.T) {
|
||||
@@ -211,16 +259,18 @@ func TestGetConntrackMax(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoadConfig tests proper operation of loadConfig()
|
||||
func TestLoadConfig(t *testing.T) {
|
||||
yaml := `apiVersion: componentconfig/v1alpha1
|
||||
bindAddress: 9.8.7.6
|
||||
|
||||
yamlTemplate := `apiVersion: componentconfig/v1alpha1
|
||||
bindAddress: %s
|
||||
clientConnection:
|
||||
acceptContentTypes: "abc"
|
||||
burst: 100
|
||||
contentType: content-type
|
||||
kubeconfig: "/path/to/kubeconfig"
|
||||
qps: 7
|
||||
clusterCIDR: "1.2.3.0/24"
|
||||
clusterCIDR: "%s"
|
||||
configSyncPeriod: 15s
|
||||
conntrack:
|
||||
max: 4
|
||||
@@ -229,7 +279,7 @@ conntrack:
|
||||
tcpCloseWaitTimeout: 10s
|
||||
tcpEstablishedTimeout: 20s
|
||||
featureGates: "all"
|
||||
healthzBindAddress: 1.2.3.4:12345
|
||||
healthzBindAddress: "%s"
|
||||
hostnameOverride: "foo"
|
||||
iptables:
|
||||
masqueradeAll: true
|
||||
@@ -237,7 +287,7 @@ iptables:
|
||||
minSyncPeriod: 10s
|
||||
syncPeriod: 60s
|
||||
kind: KubeProxyConfiguration
|
||||
metricsBindAddress: 2.3.4.5:23456
|
||||
metricsBindAddress: "%s"
|
||||
mode: "iptables"
|
||||
oomScoreAdj: 17
|
||||
portRange: "2-7"
|
||||
@@ -245,47 +295,104 @@ resourceContainer: /foo
|
||||
udpTimeoutMilliseconds: 123ms
|
||||
`
|
||||
|
||||
expected := &componentconfig.KubeProxyConfiguration{
|
||||
BindAddress: "9.8.7.6",
|
||||
ClientConnection: componentconfig.ClientConnectionConfiguration{
|
||||
AcceptContentTypes: "abc",
|
||||
Burst: 100,
|
||||
ContentType: "content-type",
|
||||
KubeConfigFile: "/path/to/kubeconfig",
|
||||
QPS: 7,
|
||||
testCases := []struct {
|
||||
name string
|
||||
bindAddress string
|
||||
clusterCIDR string
|
||||
healthzBindAddress string
|
||||
metricsBindAddress string
|
||||
}{
|
||||
{
|
||||
name: "IPv4 config",
|
||||
bindAddress: "9.8.7.6",
|
||||
clusterCIDR: "1.2.3.0/24",
|
||||
healthzBindAddress: "1.2.3.4:12345",
|
||||
metricsBindAddress: "2.3.4.5:23456",
|
||||
},
|
||||
ClusterCIDR: "1.2.3.0/24",
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 15 * time.Second},
|
||||
Conntrack: componentconfig.KubeProxyConntrackConfiguration{
|
||||
Max: 4,
|
||||
MaxPerCore: 2,
|
||||
Min: 1,
|
||||
TCPCloseWaitTimeout: metav1.Duration{Duration: 10 * time.Second},
|
||||
TCPEstablishedTimeout: metav1.Duration{Duration: 20 * time.Second},
|
||||
{
|
||||
name: "IPv6 config",
|
||||
bindAddress: "2001:db8::1",
|
||||
clusterCIDR: "fd00:1::0/64",
|
||||
healthzBindAddress: "[fd00:1::5]:12345",
|
||||
metricsBindAddress: "[fd00:2::5]:23456",
|
||||
},
|
||||
FeatureGates: "all",
|
||||
HealthzBindAddress: "1.2.3.4:12345",
|
||||
HostnameOverride: "foo",
|
||||
IPTables: componentconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
MasqueradeBit: util.Int32Ptr(17),
|
||||
MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
SyncPeriod: metav1.Duration{Duration: 60 * time.Second},
|
||||
},
|
||||
MetricsBindAddress: "2.3.4.5:23456",
|
||||
Mode: "iptables",
|
||||
OOMScoreAdj: util.Int32Ptr(17),
|
||||
PortRange: "2-7",
|
||||
ResourceContainer: "/foo",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 123 * time.Millisecond},
|
||||
}
|
||||
|
||||
options, err := NewOptions()
|
||||
assert.NoError(t, err)
|
||||
for _, tc := range testCases {
|
||||
expected := &componentconfig.KubeProxyConfiguration{
|
||||
BindAddress: tc.bindAddress,
|
||||
ClientConnection: componentconfig.ClientConnectionConfiguration{
|
||||
AcceptContentTypes: "abc",
|
||||
Burst: 100,
|
||||
ContentType: "content-type",
|
||||
KubeConfigFile: "/path/to/kubeconfig",
|
||||
QPS: 7,
|
||||
},
|
||||
ClusterCIDR: tc.clusterCIDR,
|
||||
ConfigSyncPeriod: metav1.Duration{Duration: 15 * time.Second},
|
||||
Conntrack: componentconfig.KubeProxyConntrackConfiguration{
|
||||
Max: 4,
|
||||
MaxPerCore: 2,
|
||||
Min: 1,
|
||||
TCPCloseWaitTimeout: metav1.Duration{Duration: 10 * time.Second},
|
||||
TCPEstablishedTimeout: metav1.Duration{Duration: 20 * time.Second},
|
||||
},
|
||||
FeatureGates: "all",
|
||||
HealthzBindAddress: tc.healthzBindAddress,
|
||||
HostnameOverride: "foo",
|
||||
IPTables: componentconfig.KubeProxyIPTablesConfiguration{
|
||||
MasqueradeAll: true,
|
||||
MasqueradeBit: util.Int32Ptr(17),
|
||||
MinSyncPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
SyncPeriod: metav1.Duration{Duration: 60 * time.Second},
|
||||
},
|
||||
MetricsBindAddress: tc.metricsBindAddress,
|
||||
Mode: "iptables",
|
||||
OOMScoreAdj: util.Int32Ptr(17),
|
||||
PortRange: "2-7",
|
||||
ResourceContainer: "/foo",
|
||||
UDPIdleTimeout: metav1.Duration{Duration: 123 * time.Millisecond},
|
||||
}
|
||||
|
||||
config, err := options.loadConfig([]byte(yaml))
|
||||
assert.NoError(t, err)
|
||||
if !reflect.DeepEqual(expected, config) {
|
||||
t.Fatalf("unexpected config, diff = %s", diff.ObjectDiff(config, expected))
|
||||
options, err := NewOptions()
|
||||
assert.NoError(t, err, "unexpected error for %s: %v", tc.name, err)
|
||||
|
||||
yaml := fmt.Sprintf(
|
||||
yamlTemplate, tc.bindAddress, tc.clusterCIDR,
|
||||
tc.healthzBindAddress, tc.metricsBindAddress)
|
||||
config, err := options.loadConfig([]byte(yaml))
|
||||
assert.NoError(t, err, "unexpected error for %s: %v", tc.name, err)
|
||||
if !reflect.DeepEqual(expected, config) {
|
||||
t.Fatalf("unexpected config for %s test, diff = %s", tc.name, diff.ObjectDiff(config, expected))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestLoadConfigFailures tests failure modes for loadConfig()
|
||||
func TestLoadConfigFailures(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
config string
|
||||
expErr string
|
||||
}{
|
||||
{
|
||||
name: "Decode error test",
|
||||
config: "Twas bryllyg, and ye slythy toves",
|
||||
expErr: "could not find expected ':'",
|
||||
},
|
||||
{
|
||||
name: "Bad config type test",
|
||||
config: "kind: KubeSchedulerConfiguration",
|
||||
expErr: "unexpected config type",
|
||||
},
|
||||
}
|
||||
version := "apiVersion: componentconfig/v1alpha1"
|
||||
for _, tc := range testCases {
|
||||
options, _ := NewOptions()
|
||||
config := fmt.Sprintf("%s\n%s", version, tc.config)
|
||||
_, err := options.loadConfig([]byte(config))
|
||||
if assert.Error(t, err, tc.name) {
|
||||
assert.Contains(t, err.Error(), tc.expErr, tc.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -12,3 +12,4 @@ reviewers:
|
||||
- lukemarsden
|
||||
- dmmcquay
|
||||
- krousey
|
||||
- timothysc
|
||||
|
@@ -35,12 +35,13 @@ filegroup(
|
||||
"//cmd/kubeadm/app/constants:all-srcs",
|
||||
"//cmd/kubeadm/app/discovery:all-srcs",
|
||||
"//cmd/kubeadm/app/images:all-srcs",
|
||||
"//cmd/kubeadm/app/master:all-srcs",
|
||||
"//cmd/kubeadm/app/node:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/addons:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/apiconfig:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/certs:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/controlplane:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/selfhosting:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/token:all-srcs",
|
||||
"//cmd/kubeadm/app/preflight:all-srcs",
|
||||
"//cmd/kubeadm/app/util:all-srcs",
|
||||
|
@@ -14,7 +14,6 @@ go_library(
|
||||
"env.go",
|
||||
"register.go",
|
||||
"types.go",
|
||||
"well_known_labels.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
|
@@ -25,15 +25,10 @@ import (
|
||||
|
||||
var GlobalEnvParams = SetEnvParams()
|
||||
|
||||
// TODO(phase1+) Move these paramaters to the API group
|
||||
// we need some params for testing etc, let's keep these hidden for now
|
||||
func SetEnvParams() *EnvParams {
|
||||
|
||||
envParams := map[string]string{
|
||||
"kubernetes_dir": "/etc/kubernetes",
|
||||
"hyperkube_image": "",
|
||||
"repo_prefix": "gcr.io/google_containers",
|
||||
"etcd_image": "",
|
||||
"kubernetes_dir": "/etc/kubernetes",
|
||||
}
|
||||
|
||||
for k := range envParams {
|
||||
@@ -43,9 +38,6 @@ func SetEnvParams() *EnvParams {
|
||||
}
|
||||
|
||||
return &EnvParams{
|
||||
KubernetesDir: path.Clean(envParams["kubernetes_dir"]),
|
||||
HyperkubeImage: envParams["hyperkube_image"],
|
||||
RepositoryPrefix: envParams["repo_prefix"],
|
||||
EtcdImage: envParams["etcd_image"],
|
||||
KubernetesDir: path.Clean(envParams["kubernetes_dir"]),
|
||||
}
|
||||
}
|
||||
|
@@ -36,7 +36,10 @@ func KubeadmFuzzerFuncs(t apitesting.TestingCommon) []interface{} {
|
||||
obj.CertificatesDir = "foo"
|
||||
obj.APIServerCertSANs = []string{}
|
||||
obj.Token = "foo"
|
||||
obj.Etcd.Image = "foo"
|
||||
obj.Etcd.DataDir = "foo"
|
||||
obj.ImageRepository = "foo"
|
||||
obj.UnifiedControlPlaneImage = "foo"
|
||||
},
|
||||
func(obj *kubeadm.NodeConfiguration, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(obj)
|
||||
|
@@ -23,10 +23,7 @@ import (
|
||||
)
|
||||
|
||||
type EnvParams struct {
|
||||
KubernetesDir string
|
||||
HyperkubeImage string
|
||||
RepositoryPrefix string
|
||||
EtcdImage string
|
||||
KubernetesDir string
|
||||
}
|
||||
|
||||
type MasterConfiguration struct {
|
||||
@@ -37,6 +34,7 @@ type MasterConfiguration struct {
|
||||
Networking Networking
|
||||
KubernetesVersion string
|
||||
CloudProvider string
|
||||
NodeName string
|
||||
AuthorizationModes []string
|
||||
|
||||
Token string
|
||||
@@ -55,6 +53,11 @@ type MasterConfiguration struct {
|
||||
APIServerCertSANs []string
|
||||
// CertificatesDir specifies where to store or look for all required certificates
|
||||
CertificatesDir string
|
||||
|
||||
// ImageRepository what container registry to pull control plane images from
|
||||
ImageRepository string
|
||||
// UnifiedControlPlaneImage specifies if a specific container image should be used for all control plane components
|
||||
UnifiedControlPlaneImage string
|
||||
}
|
||||
|
||||
type API struct {
|
||||
@@ -83,6 +86,8 @@ type Etcd struct {
|
||||
KeyFile string
|
||||
DataDir string
|
||||
ExtraArgs map[string]string
|
||||
// Image specifies which container image to use for running etcd. If empty, automatically populated by kubeadm using the image repository and default etcd version
|
||||
Image string
|
||||
}
|
||||
|
||||
type NodeConfiguration struct {
|
||||
@@ -93,6 +98,7 @@ type NodeConfiguration struct {
|
||||
DiscoveryToken string
|
||||
// Currently we only pay attention to one api server but hope to support >1 in the future
|
||||
DiscoveryTokenAPIServers []string
|
||||
NodeName string
|
||||
TLSBootstrapToken string
|
||||
Token string
|
||||
}
|
||||
|
@@ -33,6 +33,7 @@ const (
|
||||
DefaultCACertPath = "/etc/kubernetes/pki/ca.crt"
|
||||
DefaultCertificatesDir = "/etc/kubernetes/pki"
|
||||
DefaultEtcdDataDir = "/var/lib/etcd"
|
||||
DefaultImageRepository = "gcr.io/google_containers"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
@@ -68,6 +69,10 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) {
|
||||
obj.TokenTTL = constants.DefaultTokenDuration
|
||||
}
|
||||
|
||||
if obj.ImageRepository == "" {
|
||||
obj.ImageRepository = DefaultImageRepository
|
||||
}
|
||||
|
||||
if obj.Etcd.DataDir == "" {
|
||||
obj.Etcd.DataDir = DefaultEtcdDataDir
|
||||
}
|
||||
|
@@ -30,6 +30,7 @@ type MasterConfiguration struct {
|
||||
Networking Networking `json:"networking"`
|
||||
KubernetesVersion string `json:"kubernetesVersion"`
|
||||
CloudProvider string `json:"cloudProvider"`
|
||||
NodeName string `json:"nodeName"`
|
||||
AuthorizationModes []string `json:"authorizationModes"`
|
||||
|
||||
Token string `json:"token"`
|
||||
@@ -48,6 +49,11 @@ type MasterConfiguration struct {
|
||||
APIServerCertSANs []string `json:"apiServerCertSANs"`
|
||||
// CertificatesDir specifies where to store or look for all required certificates
|
||||
CertificatesDir string `json:"certificatesDir"`
|
||||
|
||||
// ImageRepository what container registry to pull control plane images from
|
||||
ImageRepository string `json:"imageRepository"`
|
||||
// UnifiedControlPlaneImage specifies if a specific container image should be used for all control plane components
|
||||
UnifiedControlPlaneImage string `json:"unifiedControlPlaneImage"`
|
||||
}
|
||||
|
||||
type API struct {
|
||||
@@ -76,6 +82,8 @@ type Etcd struct {
|
||||
KeyFile string `json:"keyFile"`
|
||||
DataDir string `json:"dataDir"`
|
||||
ExtraArgs map[string]string `json:"extraArgs"`
|
||||
// Image specifies which container image to use for running etcd. If empty, automatically populated by kubeadm using the image repository and default etcd version
|
||||
Image string `json:"image"`
|
||||
}
|
||||
|
||||
type NodeConfiguration struct {
|
||||
@@ -85,6 +93,7 @@ type NodeConfiguration struct {
|
||||
DiscoveryFile string `json:"discoveryFile"`
|
||||
DiscoveryToken string `json:"discoveryToken"`
|
||||
DiscoveryTokenAPIServers []string `json:"discoveryTokenAPIServers"`
|
||||
NodeName string `json:"nodeName"`
|
||||
TLSBootstrapToken string `json:"tlsBootstrapToken"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@ go_test(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -30,6 +31,8 @@ go_library(
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
|
||||
"//pkg/registry/core/service/ipallocator:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
|
@@ -24,6 +24,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
@@ -32,6 +34,7 @@ import (
|
||||
apivalidation "k8s.io/kubernetes/pkg/api/validation"
|
||||
authzmodes "k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
// TODO: Break out the cloudprovider functionality out of core and only support the new flow
|
||||
@@ -41,7 +44,6 @@ var cloudproviders = []string{
|
||||
"azure",
|
||||
"cloudstack",
|
||||
"gce",
|
||||
"mesos",
|
||||
"openstack",
|
||||
"ovirt",
|
||||
"photon",
|
||||
@@ -62,6 +64,7 @@ func ValidateMasterConfiguration(c *kubeadm.MasterConfiguration) field.ErrorList
|
||||
allErrs = append(allErrs, ValidateNetworking(&c.Networking, field.NewPath("networking"))...)
|
||||
allErrs = append(allErrs, ValidateAPIServerCertSANs(c.APIServerCertSANs, field.NewPath("cert-altnames"))...)
|
||||
allErrs = append(allErrs, ValidateAbsolutePath(c.CertificatesDir, field.NewPath("certificates-dir"))...)
|
||||
allErrs = append(allErrs, ValidateNodeName(c.NodeName, field.NewPath("node-name"))...)
|
||||
allErrs = append(allErrs, ValidateToken(c.Token, field.NewPath("token"))...)
|
||||
return allErrs
|
||||
}
|
||||
@@ -236,6 +239,14 @@ func ValidateAbsolutePath(path string, fldPath *field.Path) field.ErrorList {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateNodeName(nodename string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if node.GetHostname(nodename) != nodename {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, nodename, "nodename is not valid, must be lower case"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateCloudProvider(provider string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if len(provider) == 0 {
|
||||
@@ -249,3 +260,10 @@ func ValidateCloudProvider(provider string, fldPath *field.Path) field.ErrorList
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, provider, "cloudprovider not supported"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateMixedArguments(flag *pflag.FlagSet) error {
|
||||
if flag.Changed("config") && flag.NFlag() != 1 {
|
||||
return fmt.Errorf("can not mix '--config' with other arguments")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -19,6 +19,8 @@ package validation
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
)
|
||||
@@ -76,6 +78,29 @@ func TestValidateAuthorizationModes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateNodeName(t *testing.T) {
|
||||
var tests = []struct {
|
||||
s string
|
||||
f *field.Path
|
||||
expected bool
|
||||
}{
|
||||
{"", nil, false}, // ok if not provided
|
||||
{"1234", nil, true}, // supported
|
||||
{"valid-nodename", nil, true}, // supported
|
||||
{"INVALID-NODENAME", nil, false}, // Upper cases is invalid
|
||||
}
|
||||
for _, rt := range tests {
|
||||
actual := ValidateNodeName(rt.s, rt.f)
|
||||
if (len(actual) == 0) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed ValidateNodeName:\n\texpected: %t\n\t actual: %t",
|
||||
rt.expected,
|
||||
(len(actual) == 0),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateCloudProvider(t *testing.T) {
|
||||
var tests = []struct {
|
||||
s string
|
||||
@@ -175,6 +200,7 @@ func TestValidateIPNetFromString(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateMasterConfiguration(t *testing.T) {
|
||||
nodename := "valid-nodename"
|
||||
var tests = []struct {
|
||||
s *kubeadm.MasterConfiguration
|
||||
expected bool
|
||||
@@ -187,6 +213,7 @@ func TestValidateMasterConfiguration(t *testing.T) {
|
||||
DNSDomain: "cluster.local",
|
||||
},
|
||||
CertificatesDir: "/some/cert/dir",
|
||||
NodeName: nodename,
|
||||
}, false},
|
||||
{&kubeadm.MasterConfiguration{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
@@ -196,6 +223,7 @@ func TestValidateMasterConfiguration(t *testing.T) {
|
||||
},
|
||||
CertificatesDir: "/some/other/cert/dir",
|
||||
Token: "abcdef.0123456789abcdef",
|
||||
NodeName: nodename,
|
||||
}, true},
|
||||
{&kubeadm.MasterConfiguration{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
@@ -204,6 +232,7 @@ func TestValidateMasterConfiguration(t *testing.T) {
|
||||
DNSDomain: "cluster.local",
|
||||
},
|
||||
CertificatesDir: "/some/cert/dir",
|
||||
NodeName: nodename,
|
||||
}, false},
|
||||
{&kubeadm.MasterConfiguration{
|
||||
AuthorizationModes: []string{"Node", "RBAC"},
|
||||
@@ -213,6 +242,7 @@ func TestValidateMasterConfiguration(t *testing.T) {
|
||||
},
|
||||
CertificatesDir: "/some/other/cert/dir",
|
||||
Token: "abcdef.0123456789abcdef",
|
||||
NodeName: nodename,
|
||||
}, true},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
@@ -250,3 +280,42 @@ func TestValidateNodeConfiguration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateMixedArguments(t *testing.T) {
|
||||
var tests = []struct {
|
||||
args []string
|
||||
expected bool
|
||||
}{
|
||||
{[]string{"--foo=bar"}, true},
|
||||
{[]string{"--config=hello"}, true},
|
||||
{[]string{"--foo=bar", "--config=hello"}, false},
|
||||
}
|
||||
|
||||
var cfgPath string
|
||||
var skipPreFlight bool
|
||||
|
||||
for _, rt := range tests {
|
||||
f := pflag.NewFlagSet("test", pflag.ContinueOnError)
|
||||
if f.Parsed() {
|
||||
t.Error("f.Parse() = true before Parse")
|
||||
}
|
||||
f.String("foo", "", "string value")
|
||||
f.StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file")
|
||||
f.BoolVar(
|
||||
&skipPreFlight, "skip-preflight-checks", skipPreFlight,
|
||||
"Skip preflight checks normally run before modifying the system",
|
||||
)
|
||||
if err := f.Parse(rt.args); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
actual := ValidateMixedArguments(f)
|
||||
if (actual == nil) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed ValidateMixedArguments:\n\texpected: %t\n\t actual: %t",
|
||||
rt.expected,
|
||||
(actual == nil),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kubeadm
|
||||
|
||||
// Role labels are applied to Nodes to mark their purpose. In particular, we
|
||||
// usually want to distinguish the master, so that we can isolate privileged
|
||||
// pods and operations.
|
||||
//
|
||||
// Originally we relied on not registering the master, on the fact that the
|
||||
// master was Unschedulable, and on static manifests for master components.
|
||||
// But we now do register masters in many environments, are generally moving
|
||||
// away from static manifests (for better manageability), and working towards
|
||||
// deprecating the unschedulable field (replacing it with taints & tolerations
|
||||
// instead).
|
||||
//
|
||||
// Even with tainting, a label remains the easiest way of making a positive
|
||||
// selection, so that pods can schedule only to master nodes for example, and
|
||||
// thus installations will likely define a label for their master nodes.
|
||||
//
|
||||
// So that we can recognize master nodes in consequent places though (such as
|
||||
// kubectl get nodes), we encourage installations to use the well-known labels.
|
||||
// We define NodeLabelRole, which is the preferred form, but we will also recognize
|
||||
// other forms that are known to be in widespread use (NodeLabelKubeadmAlphaRole).
|
||||
|
||||
const (
|
||||
// NodeLabelKubeadmAlphaRole is a label that kubeadm applies to a Node as a hint that it has a particular purpose.
|
||||
// Use of NodeLabelRole is preferred.
|
||||
NodeLabelKubeadmAlphaRole = "kubeadm.alpha.kubernetes.io/role"
|
||||
)
|
@@ -13,7 +13,6 @@ go_library(
|
||||
srcs = [
|
||||
"cmd.go",
|
||||
"completion.go",
|
||||
"defaults.go",
|
||||
"init.go",
|
||||
"join.go",
|
||||
"reset.go",
|
||||
@@ -28,15 +27,16 @@ go_library(
|
||||
"//cmd/kubeadm/app/cmd/phases:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/discovery:go_default_library",
|
||||
"//cmd/kubeadm/app/master:go_default_library",
|
||||
"//cmd/kubeadm/app/node:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/addons:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/apiconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/controlplane:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/selfhosting:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/token:go_default_library",
|
||||
"//cmd/kubeadm/app/preflight:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/util/token:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
@@ -45,6 +45,7 @@ go_library(
|
||||
"//pkg/printers:go_default_library",
|
||||
"//pkg/util/i18n:go_default_library",
|
||||
"//pkg/util/initsystem:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//vendor/github.com/ghodss/yaml:go_default_library",
|
||||
@@ -54,7 +55,6 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/flag:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
@@ -65,7 +65,6 @@ go_library(
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"defaults_test.go",
|
||||
"reset_test.go",
|
||||
"token_test.go",
|
||||
],
|
||||
|
@@ -31,15 +31,17 @@ import (
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
cmdphases "k8s.io/kubernetes/cmd/kubeadm/app/cmd/phases"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master"
|
||||
addonsphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/addons"
|
||||
apiconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig"
|
||||
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
controlplanephase "k8s.io/kubernetes/cmd/kubeadm/app/phases/controlplane"
|
||||
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
selfhostingphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
|
||||
tokenphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/token"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/util/version"
|
||||
)
|
||||
@@ -84,7 +86,13 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
||||
|
||||
i, err := NewInit(cfgPath, internalcfg, skipPreFlight, skipTokenPrint)
|
||||
kubeadmutil.CheckErr(err)
|
||||
kubeadmutil.CheckErr(i.Validate())
|
||||
kubeadmutil.CheckErr(i.Validate(cmd))
|
||||
|
||||
// TODO: remove this warning in 1.9
|
||||
if !cmd.Flags().Lookup("token-ttl").Changed {
|
||||
fmt.Println("[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)")
|
||||
}
|
||||
|
||||
kubeadmutil.CheckErr(i.Run(out))
|
||||
},
|
||||
}
|
||||
@@ -121,6 +129,10 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
||||
&cfg.APIServerCertSANs, "apiserver-cert-extra-sans", cfg.APIServerCertSANs,
|
||||
`Optional extra altnames to use for the API Server serving cert. Can be both IP addresses and dns names.`,
|
||||
)
|
||||
cmd.PersistentFlags().StringVar(
|
||||
&cfg.NodeName, "node-name", cfg.NodeName,
|
||||
`Specify the node name`,
|
||||
)
|
||||
|
||||
cmd.PersistentFlags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)")
|
||||
|
||||
@@ -132,6 +144,10 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
||||
&skipTokenPrint, "skip-token-print", skipTokenPrint,
|
||||
"Skip printing of the default bootstrap token generated by 'kubeadm init'",
|
||||
)
|
||||
cmd.PersistentFlags().BoolVar(
|
||||
&cfg.SelfHosted, "self-hosted", cfg.SelfHosted,
|
||||
"[experimental] If kubeadm should make this control plane self-hosted",
|
||||
)
|
||||
|
||||
cmd.PersistentFlags().StringVar(
|
||||
&cfg.Token, "token", cfg.Token,
|
||||
@@ -159,11 +175,20 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight,
|
||||
}
|
||||
|
||||
// Set defaults dynamically that the API group defaulting can't (by fetching information from the internet, looking up network interfaces, etc.)
|
||||
err := setInitDynamicDefaults(cfg)
|
||||
err := configutil.SetInitDynamicDefaults(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Printf("[init] Using Kubernetes version: %s\n", cfg.KubernetesVersion)
|
||||
fmt.Printf("[init] Using Authorization mode: %v\n", cfg.AuthorizationModes)
|
||||
|
||||
// Warn about the limitations with the current cloudprovider solution.
|
||||
if cfg.CloudProvider != "" {
|
||||
fmt.Println("[init] WARNING: For cloudprovider integrations to work --cloud-provider must be set for all kubelets in the cluster.")
|
||||
fmt.Println("\t(/etc/systemd/system/kubelet.service.d/10-kubeadm.conf should be edited for this purpose)")
|
||||
}
|
||||
|
||||
if !skipPreFlight {
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
|
||||
@@ -186,7 +211,10 @@ type Init struct {
|
||||
}
|
||||
|
||||
// Validate validates configuration passed to "kubeadm init"
|
||||
func (i *Init) Validate() error {
|
||||
func (i *Init) Validate(cmd *cobra.Command) error {
|
||||
if err := validation.ValidateMixedArguments(cmd.Flags()); err != nil {
|
||||
return err
|
||||
}
|
||||
return validation.ValidateMasterConfiguration(i.cfg).ToAggregate()
|
||||
}
|
||||
|
||||
@@ -194,7 +222,7 @@ func (i *Init) Validate() error {
|
||||
func (i *Init) Run(out io.Writer) error {
|
||||
|
||||
// PHASE 1: Generate certificates
|
||||
err := certphase.CreatePKIAssets(i.cfg)
|
||||
err := cmdphases.CreatePKIAssets(i.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -202,36 +230,26 @@ func (i *Init) Run(out io.Writer) error {
|
||||
// PHASE 2: Generate kubeconfig files for the admin and the kubelet
|
||||
|
||||
masterEndpoint := fmt.Sprintf("https://%s:%d", i.cfg.API.AdvertiseAddress, i.cfg.API.BindPort)
|
||||
err = kubeconfigphase.CreateInitKubeConfigFiles(masterEndpoint, i.cfg.CertificatesDir, kubeadmapi.GlobalEnvParams.KubernetesDir)
|
||||
err = kubeconfigphase.CreateInitKubeConfigFiles(masterEndpoint, i.cfg.CertificatesDir, kubeadmapi.GlobalEnvParams.KubernetesDir, i.cfg.NodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// PHASE 3: Bootstrap the control plane
|
||||
if err := kubemaster.WriteStaticPodManifests(i.cfg); err != nil {
|
||||
if err := controlplanephase.WriteStaticPodManifests(i.cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
adminKubeConfigPath := filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AdminKubeConfigFileName)
|
||||
client, err := kubemaster.CreateClientAndWaitForAPI(adminKubeConfigPath)
|
||||
client, err := kubeadmutil.CreateClientAndWaitForAPI(adminKubeConfigPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client); err != nil {
|
||||
if err := apiconfigphase.UpdateMasterRoleLabelsAndTaints(client, i.cfg.NodeName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Is deployment type self-hosted?
|
||||
if i.cfg.SelfHosted {
|
||||
// Temporary control plane is up, now we create our self hosted control
|
||||
// plane components and remove the static manifests:
|
||||
fmt.Println("[self-hosted] Creating self-hosted control plane...")
|
||||
if err := kubemaster.CreateSelfHostedControlPlane(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// PHASE 4: Set up the bootstrap tokens
|
||||
if !i.skipTokenPrint {
|
||||
fmt.Printf("[token] Using token: %s\n", i.cfg.Token)
|
||||
@@ -268,6 +286,16 @@ func (i *Init) Run(out io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Is deployment type self-hosted?
|
||||
if i.cfg.SelfHosted {
|
||||
// Temporary control plane is up, now we create our self hosted control
|
||||
// plane components and remove the static manifests:
|
||||
fmt.Println("[self-hosted] Creating self-hosted control plane...")
|
||||
if err := selfhostingphase.CreateSelfHostedControlPlane(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ctx := map[string]string{
|
||||
"KubeConfigPath": filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.AdminKubeConfigFileName),
|
||||
"KubeConfigName": kubeadmconstants.AdminKubeConfigFileName,
|
||||
|
@@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/renstrom/dedent"
|
||||
@@ -33,11 +32,12 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
|
||||
kubenode "k8s.io/kubernetes/cmd/kubeadm/app/node"
|
||||
kubeadmnode "k8s.io/kubernetes/cmd/kubeadm/app/node"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -63,26 +63,26 @@ func NewCmdJoin(out io.Writer) *cobra.Command {
|
||||
Use: "join <flags> [DiscoveryTokenAPIServers]",
|
||||
Short: "Run this on any machine you wish to join an existing cluster",
|
||||
Long: dedent.Dedent(`
|
||||
When joining a kubeadm initialized cluster, we need to establish
|
||||
bidirectional trust. This is split into discovery (having the Node
|
||||
trust the Kubernetes Master) and TLS bootstrap (having the Kubernetes
|
||||
When joining a kubeadm initialized cluster, we need to establish
|
||||
bidirectional trust. This is split into discovery (having the Node
|
||||
trust the Kubernetes Master) and TLS bootstrap (having the Kubernetes
|
||||
Master trust the Node).
|
||||
|
||||
There are 2 main schemes for discovery. The first is to use a shared
|
||||
token along with the IP address of the API server. The second is to
|
||||
provide a file (a subset of the standard kubeconfig file). This file
|
||||
can be a local file or downloaded via an HTTPS URL. The forms are
|
||||
kubeadm join --discovery-token abcdef.1234567890abcdef 1.2.3.4:6443,
|
||||
There are 2 main schemes for discovery. The first is to use a shared
|
||||
token along with the IP address of the API server. The second is to
|
||||
provide a file (a subset of the standard kubeconfig file). This file
|
||||
can be a local file or downloaded via an HTTPS URL. The forms are
|
||||
kubeadm join --discovery-token abcdef.1234567890abcdef 1.2.3.4:6443,
|
||||
kubeadm join --discovery-file path/to/file.conf, or kubeadm join
|
||||
--discovery-file https://url/file.conf. Only one form can be used. If
|
||||
the discovery information is loaded from a URL, HTTPS must be used and
|
||||
--discovery-file https://url/file.conf. Only one form can be used. If
|
||||
the discovery information is loaded from a URL, HTTPS must be used and
|
||||
the host installed CA bundle is used to verify the connection.
|
||||
|
||||
The TLS bootstrap mechanism is also driven via a shared token. This is
|
||||
The TLS bootstrap mechanism is also driven via a shared token. This is
|
||||
used to temporarily authenticate with the Kubernetes Master to submit a
|
||||
certificate signing request (CSR) for a locally created key pair. By
|
||||
default kubeadm will set up the Kubernetes Master to automatically
|
||||
approve these signing requests. This token is passed in with the
|
||||
certificate signing request (CSR) for a locally created key pair. By
|
||||
default kubeadm will set up the Kubernetes Master to automatically
|
||||
approve these signing requests. This token is passed in with the
|
||||
--tls-bootstrap-token abcdef.1234567890abcdef flag.
|
||||
|
||||
Often times the same token is used for both parts. In this case, the
|
||||
@@ -97,7 +97,7 @@ func NewCmdJoin(out io.Writer) *cobra.Command {
|
||||
|
||||
j, err := NewJoin(cfgPath, args, internalcfg, skipPreFlight)
|
||||
kubeadmutil.CheckErr(err)
|
||||
kubeadmutil.CheckErr(j.Validate())
|
||||
kubeadmutil.CheckErr(j.Validate(cmd))
|
||||
kubeadmutil.CheckErr(j.Run(out))
|
||||
},
|
||||
}
|
||||
@@ -112,6 +112,9 @@ func NewCmdJoin(out io.Writer) *cobra.Command {
|
||||
cmd.PersistentFlags().StringVar(
|
||||
&cfg.DiscoveryToken, "discovery-token", "",
|
||||
"A token used to validate cluster information fetched from the master")
|
||||
cmd.PersistentFlags().StringVar(
|
||||
&cfg.NodeName, "node-name", "",
|
||||
"Specify the node name")
|
||||
cmd.PersistentFlags().StringVar(
|
||||
&cfg.TLSBootstrapToken, "tls-bootstrap-token", "",
|
||||
"A token used for TLS bootstrapping")
|
||||
@@ -161,7 +164,10 @@ func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, s
|
||||
return &Join{cfg: cfg}, nil
|
||||
}
|
||||
|
||||
func (j *Join) Validate() error {
|
||||
func (j *Join) Validate(cmd *cobra.Command) error {
|
||||
if err := validation.ValidateMixedArguments(cmd.PersistentFlags()); err != nil {
|
||||
return err
|
||||
}
|
||||
return validation.ValidateNodeConfiguration(j.cfg).ToAggregate()
|
||||
}
|
||||
|
||||
@@ -172,18 +178,16 @@ func (j *Join) Run(out io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hostname := nodeutil.GetHostname(j.cfg.NodeName)
|
||||
|
||||
client, err := kubeconfigutil.KubeConfigToClientSet(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kubenode.ValidateAPIServer(client); err != nil {
|
||||
if err := kubeadmnode.ValidateAPIServer(client); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kubenode.PerformTLSBootstrap(cfg, hostname); err != nil {
|
||||
if err := kubeadmnode.PerformTLSBootstrap(cfg, hostname); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -5,6 +5,7 @@ licenses(["notice"])
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
@@ -14,20 +15,38 @@ go_library(
|
||||
"kubeconfig.go",
|
||||
"phase.go",
|
||||
"preflight.go",
|
||||
"selfhosting.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/selfhosting:go_default_library",
|
||||
"//cmd/kubeadm/app/preflight:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//cmd/kubeadm/app/util/config:go_default_library",
|
||||
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["certs_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm/install:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//vendor/github.com/renstrom/dedent:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -17,18 +17,20 @@ limitations under the License.
|
||||
package phases
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
netutil "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
@@ -40,63 +42,342 @@ func NewCmdCerts() *cobra.Command {
|
||||
RunE: subCmdRunE("certs"),
|
||||
}
|
||||
|
||||
cmd.AddCommand(NewCmdSelfSign())
|
||||
cmd.AddCommand(newSubCmdCerts()...)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func NewCmdSelfSign() *cobra.Command {
|
||||
// TODO: Move this into a dedicated Certificates Phase API object
|
||||
// newSubCmdCerts returns sub commands for certs phase
|
||||
func newSubCmdCerts() []*cobra.Command {
|
||||
|
||||
cfg := &kubeadmapiext.MasterConfiguration{}
|
||||
// Default values for the cobra help text
|
||||
api.Scheme.Default(cfg)
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "selfsign",
|
||||
Short: "Generate the CA, APIServer signing/client cert, the ServiceAccount public/private keys and a CA and client cert for the front proxy",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var cfgPath string
|
||||
var subCmds []*cobra.Command
|
||||
|
||||
// Run the defaulting once again to take passed flags into account
|
||||
api.Scheme.Default(cfg)
|
||||
internalcfg := &kubeadmapi.MasterConfiguration{}
|
||||
api.Scheme.Convert(cfg, internalcfg, nil)
|
||||
|
||||
err := RunSelfSign(internalcfg)
|
||||
kubeadmutil.CheckErr(err)
|
||||
subCmdProperties := []struct {
|
||||
use string
|
||||
short string
|
||||
cmdFunc func(cfg *kubeadmapi.MasterConfiguration) error
|
||||
}{
|
||||
{
|
||||
use: "all",
|
||||
short: "Generate all PKI assets necessary to establish the control plane",
|
||||
cmdFunc: CreatePKIAssets,
|
||||
},
|
||||
{
|
||||
use: "ca",
|
||||
short: "Generate CA certificate and key for a Kubernetes cluster.",
|
||||
cmdFunc: createOrUseCACertAndKey,
|
||||
},
|
||||
{
|
||||
use: "apiserver",
|
||||
short: "Generate API Server serving certificate and key.",
|
||||
cmdFunc: createOrUseAPIServerCertAndKey,
|
||||
},
|
||||
{
|
||||
use: "apiserver-kubelet-client",
|
||||
short: "Generate a client certificate for the API Server to connect to the kubelets securely.",
|
||||
cmdFunc: createOrUseAPIServerKubeletClientCertAndKey,
|
||||
},
|
||||
{
|
||||
use: "sa",
|
||||
short: "Generate a private key for signing service account tokens along with its public key.",
|
||||
cmdFunc: createOrUseServiceAccountKeyAndPublicKey,
|
||||
},
|
||||
{
|
||||
use: "front-proxy-ca",
|
||||
short: "Generate front proxy CA certificate and key for a Kubernetes cluster.",
|
||||
cmdFunc: createOrUseFrontProxyCACertAndKey,
|
||||
},
|
||||
{
|
||||
use: "front-proxy-client",
|
||||
short: "Generate front proxy CA client certificate and key for a Kubernetes cluster.",
|
||||
cmdFunc: createOrUseFrontProxyClientCertAndKey,
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringVar(&cfg.Networking.DNSDomain, "dns-domain", cfg.Networking.DNSDomain, "The DNS Domain for the Kubernetes cluster.")
|
||||
cmd.Flags().StringVar(&cfg.CertificatesDir, "cert-dir", cfg.CertificatesDir, "The path where to save and store the certificates.")
|
||||
cmd.Flags().StringVar(&cfg.Networking.ServiceSubnet, "service-cidr", cfg.Networking.ServiceSubnet, "The subnet for the Services in the cluster.")
|
||||
cmd.Flags().StringSliceVar(&cfg.APIServerCertSANs, "cert-altnames", []string{}, "Optional extra altnames to use for the API Server serving cert. Can be both IP addresses and dns names.")
|
||||
cmd.Flags().StringVar(&cfg.API.AdvertiseAddress, "apiserver-advertise-address", cfg.API.AdvertiseAddress, "The IP address the API Server will advertise it's listening on. 0.0.0.0 means the default network interface's address.")
|
||||
|
||||
return cmd
|
||||
for _, properties := range subCmdProperties {
|
||||
// Creates the UX Command
|
||||
cmd := &cobra.Command{
|
||||
Use: properties.use,
|
||||
Short: properties.short,
|
||||
Run: runCmdFunc(properties.cmdFunc, &cfgPath, cfg),
|
||||
}
|
||||
|
||||
// Add flags to the command
|
||||
cmd.Flags().StringVar(&cfgPath, "config", cfgPath, "Path to kubeadm config file (WARNING: Usage of a configuration file is experimental)")
|
||||
cmd.Flags().StringVar(&cfg.CertificatesDir, "cert-dir", cfg.CertificatesDir, "The path where to save and store the certificates")
|
||||
if properties.use == "all" || properties.use == "apiserver" {
|
||||
cmd.Flags().StringVar(&cfg.Networking.DNSDomain, "service-dns-domain", cfg.Networking.DNSDomain, "Use alternative domain for services, e.g. \"myorg.internal\"")
|
||||
cmd.Flags().StringVar(&cfg.Networking.ServiceSubnet, "service-cidr", cfg.Networking.ServiceSubnet, "Use alternative range of IP address for service VIPs")
|
||||
cmd.Flags().StringSliceVar(&cfg.APIServerCertSANs, "apiserver-cert-extra-sans", []string{}, "Optional extra altnames to use for the API Server serving cert. Can be both IP addresses and dns names.")
|
||||
cmd.Flags().StringVar(&cfg.API.AdvertiseAddress, "apiserver-advertise-address", cfg.API.AdvertiseAddress, "The IP address the API Server will advertise it's listening on. 0.0.0.0 means the default network interface's address.")
|
||||
}
|
||||
|
||||
subCmds = append(subCmds, cmd)
|
||||
}
|
||||
|
||||
return subCmds
|
||||
}
|
||||
|
||||
// RunSelfSign generates certificate assets in the specified directory
|
||||
func RunSelfSign(config *kubeadmapi.MasterConfiguration) error {
|
||||
if err := validateArgs(config); err != nil {
|
||||
return fmt.Errorf("The argument validation failed: %v", err)
|
||||
// runCmdFunc creates a cobra.Command Run function, by composing the call to the given cmdFunc with necessary additional steps (e.g preparation of inpunt parameters)
|
||||
func runCmdFunc(cmdFunc func(cfg *kubeadmapi.MasterConfiguration) error, cfgPath *string, cfg *kubeadmapiext.MasterConfiguration) func(cmd *cobra.Command, args []string) {
|
||||
|
||||
// the following statement build a clousure that wraps a call to a CreateCertFunc, binding
|
||||
// the function itself with the specific parameters of each sub command.
|
||||
// Please note that specific parameter should be passed as value, while other parameters - passed as reference -
|
||||
// are shared between sub commnands and gets access to current value e.g. flags value.
|
||||
|
||||
return func(cmd *cobra.Command, args []string) {
|
||||
internalcfg := &kubeadmapi.MasterConfiguration{}
|
||||
|
||||
// Takes passed flags into account; the defaulting is executed once again enforcing assignement of
|
||||
// static default values to cfg only for values not provided with flags
|
||||
api.Scheme.Default(cfg)
|
||||
api.Scheme.Convert(cfg, internalcfg, nil)
|
||||
|
||||
// Loads configuration from config file, if provided
|
||||
// Nb. --config overrides command line flags
|
||||
err := configutil.TryLoadMasterConfiguration(*cfgPath, internalcfg)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Applies dynamic defaults to settings not provided with flags
|
||||
err = configutil.SetInitDynamicDefaults(internalcfg)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Validates cfg (flags/configs + defaults + dynamic defaults)
|
||||
err = validation.ValidateMasterConfiguration(internalcfg).ToAggregate()
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
// Execute the cmdFunc
|
||||
err = cmdFunc(internalcfg)
|
||||
kubeadmutil.CheckErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane.
|
||||
// Please note that this action is a bulk action calling all the atomic certphase actions
|
||||
func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
certActions := []func(cfg *kubeadmapi.MasterConfiguration) error{
|
||||
createOrUseCACertAndKey,
|
||||
createOrUseAPIServerCertAndKey,
|
||||
createOrUseAPIServerKubeletClientCertAndKey,
|
||||
createOrUseServiceAccountKeyAndPublicKey,
|
||||
createOrUseFrontProxyCACertAndKey,
|
||||
createOrUseFrontProxyClientCertAndKey,
|
||||
}
|
||||
|
||||
// If it's possible to detect the default IP, add it to the SANs as well. Otherwise, just go with the provided ones
|
||||
ip, err := netutil.ChooseBindAddress(net.ParseIP(config.API.AdvertiseAddress))
|
||||
if err == nil {
|
||||
config.API.AdvertiseAddress = ip.String()
|
||||
for _, action := range certActions {
|
||||
err := action(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = certphase.CreatePKIAssets(config); err != nil {
|
||||
return err
|
||||
fmt.Printf("[certificates] Valid certificates and keys now exist in %q\n", cfg.CertificatesDir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createOrUseCACertAndKey create a new self signed CA, or use the existing one.
|
||||
func createOrUseCACertAndKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
return createOrUseCertificateAuthorithy(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.CACertAndKeyBaseName,
|
||||
"CA",
|
||||
certphase.NewCACertAndKey,
|
||||
)
|
||||
}
|
||||
|
||||
// createOrUseAPIServerCertAndKey create a new CA certificate for apiserver, or use the existing one.
|
||||
// It assumes the CA certificates should exists into the CertificatesDir
|
||||
func createOrUseAPIServerCertAndKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
return createOrUseSignedCertificate(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.CACertAndKeyBaseName,
|
||||
kubeadmconstants.APIServerCertAndKeyBaseName,
|
||||
"API server",
|
||||
func(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
return certphase.NewAPIServerCertAndKey(cfg, caCert, caKey)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// create a new CA certificate for kubelets calling apiserver, or use the existing one
|
||||
// It assumes the CA certificates should exists into the CertificatesDir
|
||||
func createOrUseAPIServerKubeletClientCertAndKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
return createOrUseSignedCertificate(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.CACertAndKeyBaseName,
|
||||
kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName,
|
||||
"API server kubelet client",
|
||||
certphase.NewAPIServerKubeletClientCertAndKey,
|
||||
)
|
||||
}
|
||||
|
||||
// createOrUseServiceAccountKeyAndPublicKey create a new public/private key pairs for signing service account user, or use the existing one.
|
||||
func createOrUseServiceAccountKeyAndPublicKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
return createOrUseKeyAndPublicKey(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.ServiceAccountKeyBaseName,
|
||||
"service account",
|
||||
certphase.NewServiceAccountSigningKey,
|
||||
)
|
||||
}
|
||||
|
||||
// createOrUseFrontProxyCACertAndKey create a new self signed front proxy CA, or use the existing one.
|
||||
func createOrUseFrontProxyCACertAndKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
return createOrUseCertificateAuthorithy(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.FrontProxyCACertAndKeyBaseName,
|
||||
"front-proxy CA",
|
||||
certphase.NewFrontProxyCACertAndKey,
|
||||
)
|
||||
}
|
||||
|
||||
// createOrUseFrontProxyClientCertAndKey create a new certificate for proxy server client, or use the existing one.
|
||||
// It assumes the front proxy CA certificates should exists into the CertificatesDir
|
||||
func createOrUseFrontProxyClientCertAndKey(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
return createOrUseSignedCertificate(
|
||||
cfg.CertificatesDir,
|
||||
kubeadmconstants.FrontProxyCACertAndKeyBaseName,
|
||||
kubeadmconstants.FrontProxyClientCertAndKeyBaseName,
|
||||
"front-proxy client",
|
||||
certphase.NewFrontProxyClientCertAndKey,
|
||||
)
|
||||
}
|
||||
|
||||
// createOrUseCertificateAuthorithy is a generic function that will create a new certificate Authorithy using the given newFunc,
|
||||
// assign file names according to the given baseName, or use the existing one already present in pkiDir.
|
||||
func createOrUseCertificateAuthorithy(pkiDir string, baseName string, UXName string, newFunc func() (*x509.Certificate, *rsa.PrivateKey, error)) error {
|
||||
|
||||
// If cert or key exists, we should try to load them
|
||||
if pkiutil.CertOrKeyExist(pkiDir, baseName) {
|
||||
|
||||
// Try to load .crt and .key from the PKI directory
|
||||
caCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure loading %s certificate: %v", UXName, err)
|
||||
}
|
||||
|
||||
// Check if the existing cert is a CA
|
||||
if !caCert.IsCA {
|
||||
return fmt.Errorf("certificate %s is not a CA", UXName)
|
||||
}
|
||||
|
||||
fmt.Printf("[certificates] Using the existing %s certificate and key.\n", UXName)
|
||||
} else {
|
||||
// The certificate and the key did NOT exist, let's generate them now
|
||||
caCert, caKey, err := newFunc()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure while generating %s certificate and key: %v", UXName, err)
|
||||
}
|
||||
|
||||
// Write .crt and .key files to disk
|
||||
if err = pkiutil.WriteCertAndKey(pkiDir, baseName, caCert, caKey); err != nil {
|
||||
return fmt.Errorf("failure while saving %s certificate and key: %v", UXName, err)
|
||||
}
|
||||
|
||||
fmt.Printf("[certificates] Generated %s certificate and key.\n", UXName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateArgs(config *kubeadmapi.MasterConfiguration) error {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, validation.ValidateNetworking(&config.Networking, field.NewPath("networking"))...)
|
||||
allErrs = append(allErrs, validation.ValidateAbsolutePath(config.CertificatesDir, field.NewPath("cert-dir"))...)
|
||||
allErrs = append(allErrs, validation.ValidateAPIServerCertSANs(config.APIServerCertSANs, field.NewPath("cert-altnames"))...)
|
||||
allErrs = append(allErrs, validation.ValidateIPFromString(config.API.AdvertiseAddress, field.NewPath("apiserver-advertise-address"))...)
|
||||
// createOrUseSignedCertificate is a generic function that will create a new signed certificate using the given newFunc,
|
||||
// assign file names according to the given baseName, or use the existing one already present in pkiDir.
|
||||
func createOrUseSignedCertificate(pkiDir string, CABaseName string, baseName string, UXName string, newFunc func(*x509.Certificate, *rsa.PrivateKey) (*x509.Certificate, *rsa.PrivateKey, error)) error {
|
||||
|
||||
return allErrs.ToAggregate()
|
||||
// Checks if certificate authorithy exists in the PKI directory
|
||||
if !pkiutil.CertOrKeyExist(pkiDir, CABaseName) {
|
||||
return fmt.Errorf("couldn't load certificate authorithy for %s from certificate dir", UXName)
|
||||
}
|
||||
|
||||
// Try to load certificate authorithy .crt and .key from the PKI directory
|
||||
caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, CABaseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure loading certificate authorithy for %s: %v", UXName, err)
|
||||
}
|
||||
|
||||
// Make sure the loaded CA cert actually is a CA
|
||||
if !caCert.IsCA {
|
||||
return fmt.Errorf("certificate authorithy for %s is not a CA", UXName)
|
||||
}
|
||||
|
||||
// Checks if the signed certificate exists in the PKI directory
|
||||
if pkiutil.CertOrKeyExist(pkiDir, baseName) {
|
||||
// Try to load signed certificate .crt and .key from the PKI directory
|
||||
signedCert, _, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, baseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure loading %s certificate: %v", UXName, err)
|
||||
}
|
||||
|
||||
// Check if the existing cert is signed by the given CA
|
||||
if err := signedCert.CheckSignatureFrom(caCert); err != nil {
|
||||
return fmt.Errorf("certificate %s is not signed by corresponding CA", UXName)
|
||||
}
|
||||
|
||||
fmt.Printf("[certificates] Using the existing %s certificate and key.\n", UXName)
|
||||
} else {
|
||||
// The certificate and the key did NOT exist, let's generate them now
|
||||
signedCert, signedKey, err := newFunc(caCert, caKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure while generating %s key and certificate: %v", UXName, err)
|
||||
}
|
||||
|
||||
// Write .crt and .key files to disk
|
||||
if err = pkiutil.WriteCertAndKey(pkiDir, baseName, signedCert, signedKey); err != nil {
|
||||
return fmt.Errorf("failure while saving %s certificate and key: %v", UXName, err)
|
||||
}
|
||||
|
||||
fmt.Printf("[certificates] Generated %s certificate and key.\n", UXName)
|
||||
if pkiutil.HasServerAuth(signedCert) {
|
||||
fmt.Printf("[certificates] %s serving cert is signed for DNS names %v and IPs %v\n", UXName, signedCert.DNSNames, signedCert.IPAddresses)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createOrUseKeyAndPublicKey is a generic function that will create a new public/private key pairs using the given newFunc,
|
||||
// assign file names according to the given baseName, or use the existing one already present in pkiDir.
|
||||
func createOrUseKeyAndPublicKey(pkiDir string, baseName string, UXName string, newFunc func() (*rsa.PrivateKey, error)) error {
|
||||
|
||||
// Checks if the key exists in the PKI directory
|
||||
if pkiutil.CertOrKeyExist(pkiDir, baseName) {
|
||||
|
||||
// Try to load .key from the PKI directory
|
||||
_, err := pkiutil.TryLoadKeyFromDisk(pkiDir, baseName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s key existed but they could not be loaded properly: %v", UXName, err)
|
||||
}
|
||||
|
||||
fmt.Printf("[certificates] Using the existing %s key.\n", UXName)
|
||||
} else {
|
||||
// The key does NOT exist, let's generate it now
|
||||
key, err := newFunc()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure while generating %s key: %v", UXName, err)
|
||||
}
|
||||
|
||||
// Write .key and .pub files to disk
|
||||
if err = pkiutil.WriteKey(pkiDir, baseName, key); err != nil {
|
||||
return fmt.Errorf("failure while saving %s key: %v", UXName, err)
|
||||
}
|
||||
|
||||
if err = pkiutil.WritePublicKey(pkiDir, baseName, &key.PublicKey); err != nil {
|
||||
return fmt.Errorf("failure while saving %s public key: %v", UXName, err)
|
||||
}
|
||||
fmt.Printf("[certificates] Generated %s key and public key.\n", UXName)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user